From 3a87bdf0e5e557d6c780a1acf3d22e4946bd429d Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sat, 26 Feb 2022 19:43:39 +0100 Subject: [PATCH 01/83] MAINT: Replace Fields by equivalent standard list. --- src/porepy/viz/exporter.py | 44 +++----------------------------------- 1 file changed, 3 insertions(+), 41 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index b50be87add..cb223d7ce8 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -54,44 +54,6 @@ def _check_values(self) -> None: if self.values is None: raise ValueError("Field " + str(self.name) + " values not valid") - -class Fields: - """ - Internal class to store a list of field. - """ - - def __init__(self) -> None: - self._fields: List[Field] = [] - - def __iter__(self) -> Generator[Field, None, None]: - """ - Iterator on all the fields. - """ - for f in self._fields: - yield f - - def __repr__(self) -> str: - """ - Repr function - """ - return "\n".join([repr(f) for f in self]) - - def extend(self, new_fields: List[Field]) -> None: - """ - Extend the list of fields with additional fields. - """ - if isinstance(new_fields, list): - self._fields.extend(new_fields) - else: - raise ValueError - - def names(self) -> List[str]: - """ - Return the list of name of the fields. - """ - return [f.name for f in self] - - class Exporter: def __init__( self, @@ -315,7 +277,7 @@ def _export_g(self, data: Dict[str, np.ndarray], time_step): if data is None: data = dict() - fields = Fields() + fields: List[Field] = [] if len(data) > 0: fields.extend([Field(n, v) for n, v in data.items()]) @@ -358,7 +320,7 @@ def _export_gb(self, data: List[str], time_step: float) -> None: break # Transfer data to fields. - node_fields = Fields() + node_fields: List[Field] = [] if len(node_data) > 0: node_fields.extend([Field(d) for d in node_data]) @@ -410,7 +372,7 @@ def _export_gb(self, data: List[str], time_step: float) -> None: break # Transfer data to fields. - edge_fields = Fields() + edge_fields: List[Field] = [] if len(edge_data) > 0: edge_fields.extend([Field(d) for d in edge_data]) From 62d186e3ddee19d6edabaff631a8025602cd6cfd Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sat, 26 Feb 2022 19:48:30 +0100 Subject: [PATCH 02/83] MAINT: Remove redundant code. --- src/porepy/viz/exporter.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index cb223d7ce8..41ff2d5e97 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -50,10 +50,6 @@ def check(self, values: Optional[np.ndarray], g: pp.Grid) -> None: if np.atleast_2d(values).shape[1] != g.num_cells: raise ValueError("Field " + str(self.name) + " has wrong dimension.") - def _check_values(self) -> None: - if self.values is None: - raise ValueError("Field " + str(self.name) + " values not valid") - class Exporter: def __init__( self, From e0730adb79303b41d89c55bdf24c880d44633144 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sun, 27 Feb 2022 23:10:33 +0100 Subject: [PATCH 03/83] MAINT: Convert grids to grid buckets in init --- src/porepy/viz/exporter.py | 93 ++++++++++++++++++++------------------ 1 file changed, 49 insertions(+), 44 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 41ff2d5e97..db069f4340 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -53,7 +53,7 @@ def check(self, values: Optional[np.ndarray], g: pp.Grid) -> None: class Exporter: def __init__( self, - grid: Union[pp.Grid, pp.GridBucket], + gb: Union[pp.Grid, pp.GridBucket], file_name: str, folder_name: Optional[str] = None, **kwargs, @@ -62,7 +62,7 @@ def __init__( Class for exporting data to vtu files. Parameters: - grid: the grid or grid bucket + gb: grid bucket (if grid is provided, it will be converted to a grid bucket) file_name: the root of file name without any extension. folder_name: (optional) the name of the folder to save the file. If the folder does not exist it will be created. @@ -101,13 +101,12 @@ def __init__( """ - if isinstance(grid, pp.GridBucket): - self.is_GridBucket = True - self.gb: pp.GridBucket = grid - + # Exporter is operating on grid buckets. If a grid is provided, convert to a grid bucket. + if isinstance(gb, pp.Grid): + self.gb = pp.GridBucket() + self.gb.add_nodes(gb) else: - self.is_GridBucket = False - self.grid: pp.Grid = grid # type: ignore + self.gb = gb self.file_name = file_name self.folder_name = folder_name @@ -119,23 +118,25 @@ def __init__( self.cell_id_key = "cell_id" - if self.is_GridBucket: + if self.gb.num_graph_nodes() > 0: # Fixed-dimensional grids to be included in the export. We include # all but the 0-d grids self.dims = np.setdiff1d(self.gb.all_dims(), [0]) num_dims = self.dims.size self.meshio_geom = dict(zip(self.dims, [tuple()] * num_dims)) # type: ignore + if self.gb.num_graph_edges() > 0: # mortar grid variables self.m_dims = np.unique([d["mortar_grid"].dim for _, d in self.gb.edges()]) num_m_dims = self.m_dims.size self.m_meshio_geom = dict(zip(self.m_dims, [tuple()] * num_m_dims)) # type: ignore else: - self.meshio_geom = tuple() # type: ignore + self.m_dims = np.zeros(0, dtype=int) # Assume numba is available self.has_numba: bool = True + # TODO what is behind that? this takes time so we should explain. self._update_meshio_geom() # Counter for time step. Will be used to identify files of individual time step, @@ -154,12 +155,13 @@ def change_name(self, file_name: str) -> None: """ self.file_name = file_name + # TODO do we need dicts here? do we want to support both for grid buckets (this does not make sense) def write_vtu( self, data: Optional[Union[Dict, List[str]]] = None, time_dependent: bool = False, time_step: int = None, - grid: Optional[Union[pp.Grid, pp.GridBucket]] = None, + gb: Optional[Union[pp.Grid, pp.GridBucket]] = None, ) -> None: """ Interface function to export the grid and additional data with meshio. @@ -181,17 +183,22 @@ def write_vtu( grid: (optional) in case of changing grid set a new one. """ - if self.fixed_grid and grid is not None: + if self.fixed_grid and gb is not None: raise ValueError("Inconsistency in exporter setting") - elif not self.fixed_grid and grid is not None: - if self.is_GridBucket: - self.gb = grid # type: ignore + elif not self.fixed_grid and gb is not None: + # Convert to grid bucket if a grid is provided. + if isinstance(gb, pp.Grid): + self.gb = pp.GridBucket() + self.gb.add_nodes(gb) else: - self.grid = grid # type: ignore + self.gb = gb + # Update meshio format self._update_meshio_geom() + # TODO is this option actively used? # If the problem is time dependent, but no time step is set, we set one + # using the updated, internal counter. if time_dependent and time_step is None: time_step = self._time_step_counter self._time_step_counter += 1 @@ -201,11 +208,16 @@ def write_vtu( if time_step is not None: self._exported_time_step_file_names.append(time_step) - if self.is_GridBucket: + # NOTE grid buckets are exported with dict data, while single + # grids (converted to grid buckets) are exported with list data. + if isinstance(data, list): self._export_gb(data, time_step) # type: ignore + elif isinstance(data, dict): + self._export_g(data, time_step) else: - self._export_g(data, time_step) # type: ignore + raise NotImplemented("No other data type than list and dict supported.") + # TODO does not contain all keywords as in write_vtu. make consistent? def write_pvd( self, timestep: np.ndarray, @@ -248,15 +260,11 @@ def write_pvd( o_file.write(header) fm = '\t\n' - if self.is_GridBucket: - for time, fn in zip(timestep, file_extension): - for dim in self.dims: - o_file.write( - fm % (time, self._make_file_name(self.file_name, fn, dim)) - ) - else: - for time, fn in zip(timestep, file_extension): - o_file.write(fm % (time, self._make_file_name(self.file_name, fn))) + for time, fn in zip(timestep, file_extension): + for dim in self.dims: + o_file.write( + fm % (time, self._make_file_name(self.file_name, fn, dim)) + ) o_file.write("\n" + "") o_file.close() @@ -277,11 +285,13 @@ def _export_g(self, data: Dict[str, np.ndarray], time_step): if len(data) > 0: fields.extend([Field(n, v) for n, v in data.items()]) - grid_dim = self.grid.dim * np.ones(self.grid.num_cells, dtype=int) + grid = [g for g, _ in self.gb.nodes()][0] + + grid_dim = grid.dim * np.ones(grid.num_cells, dtype=int) fields.extend([Field("grid_dim", grid_dim)]) - self._write(fields, name, self.meshio_geom) + self._write(fields, name, self.meshio_geom[grid.dim]) def _export_gb(self, data: List[str], time_step: float) -> None: """Export the entire GridBucket and additional data to vtu. @@ -775,21 +785,16 @@ def _write(self, fields: Iterable[Field], file_name: str, meshio_geom) -> None: meshio.write(file_name, meshio_grid_to_export, binary=self.binary) def _update_meshio_geom(self): - if self.is_GridBucket: - for dim in self.dims: - g = self.gb.get_grids(lambda g: g.dim == dim) - self.meshio_geom[dim] = self._export_grid(g, dim) - - for dim in self.m_dims: - # extract the mortar grids for dimension dim - mgs = self.gb.get_mortar_grids(lambda g: g.dim == dim) - # it contains the mortar grids "unrolled" by sides - mg = np.array([g for m in mgs for _, g in m.side_grids.items()]) - self.m_meshio_geom[dim] = self._export_grid(mg, dim) - else: - self.meshio_geom = self._export_grid( - np.atleast_1d(self.grid), self.grid.dim - ) + for dim in self.dims: + g = self.gb.get_grids(lambda g: g.dim == dim) + self.meshio_geom[dim] = self._export_grid(g, dim) + + for dim in self.m_dims: + # extract the mortar grids for dimension dim + mgs = self.gb.get_mortar_grids(lambda g: g.dim == dim) + # it contains the mortar grids "unrolled" by sides + mg = np.array([g for m in mgs for _, g in m.side_grids.items()]) + self.m_meshio_geom[dim] = self._export_grid(mg, dim) def _make_folder(self, folder_name: Optional[str] = None, name: str = "") -> str: if folder_name is None: From 97292c5cc20d20eb7a3ce0640d4a16f608d60cb3 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sun, 27 Feb 2022 23:25:32 +0100 Subject: [PATCH 04/83] MAINT: Refactor __init__. --- src/porepy/viz/exporter.py | 36 ++++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index db069f4340..0641d5e227 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -105,38 +105,42 @@ def __init__( if isinstance(gb, pp.Grid): self.gb = pp.GridBucket() self.gb.add_nodes(gb) - else: + elif isinstance(gb, pp.GridBucket): self.gb = gb + else: + raise TypeError("Exporter only supports single grids and grid buckets.") + # Store target location for storing vtu and pvd files. self.file_name = file_name self.folder_name = folder_name + + # Check for optional keywords self.fixed_grid: bool = kwargs.pop("fixed_grid", True) self.binary: bool = kwargs.pop("binary", True) if kwargs: msg = "Exporter() got unexpected keyword argument '{}'" raise TypeError(msg.format(kwargs.popitem()[0])) + # Hard code keywords self.cell_id_key = "cell_id" - if self.gb.num_graph_nodes() > 0: - # Fixed-dimensional grids to be included in the export. We include - # all but the 0-d grids - self.dims = np.setdiff1d(self.gb.all_dims(), [0]) - num_dims = self.dims.size - self.meshio_geom = dict(zip(self.dims, [tuple()] * num_dims)) # type: ignore - - if self.gb.num_graph_edges() > 0: - # mortar grid variables - self.m_dims = np.unique([d["mortar_grid"].dim for _, d in self.gb.edges()]) - num_m_dims = self.m_dims.size - self.m_meshio_geom = dict(zip(self.m_dims, [tuple()] * num_m_dims)) # type: ignore - else: - self.m_dims = np.zeros(0, dtype=int) + # Generate infrastructure for storing fixed-dimensional grids in + # meshio format. Include all but the 0-d grids + self.dims = np.setdiff1d(self.gb.all_dims(), [0]) + num_dims = self.dims.size + self.meshio_geom = dict(zip(self.dims, [tuple()] * num_dims)) # type: ignore + + # Generate infrastructure for storing fixed-dimensional mortar grids + # in meshio format. + self.m_dims = np.unique([d["mortar_grid"].dim for _, d in self.gb.edges()]) + num_m_dims = self.m_dims.size + self.m_meshio_geom = dict(zip(self.m_dims, [tuple()] * num_m_dims)) # type: ignore # Assume numba is available + # TODO rm hardcoded assumption, and move somewhere else self.has_numba: bool = True - # TODO what is behind that? this takes time so we should explain. + # Generate geometrical information in meshio format self._update_meshio_geom() # Counter for time step. Will be used to identify files of individual time step, From d7a385168d33dbaa44e35ffa6af42580fd15358b Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Mon, 28 Feb 2022 00:01:03 +0100 Subject: [PATCH 05/83] MAINT: Rename private routines. --- src/porepy/viz/exporter.py | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 0641d5e227..e98db7285d 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -197,10 +197,9 @@ def write_vtu( else: self.gb = gb - # Update meshio format + # Update geometrical info in meshio format self._update_meshio_geom() - # TODO is this option actively used? # If the problem is time dependent, but no time step is set, we set one # using the updated, internal counter. if time_dependent and time_step is None: @@ -215,9 +214,9 @@ def write_vtu( # NOTE grid buckets are exported with dict data, while single # grids (converted to grid buckets) are exported with list data. if isinstance(data, list): - self._export_gb(data, time_step) # type: ignore + self._export_list_data(data, time_step) # type: ignore elif isinstance(data, dict): - self._export_g(data, time_step) + self._export_dict_data(data, time_step) else: raise NotImplemented("No other data type than list and dict supported.") @@ -273,10 +272,18 @@ def write_pvd( o_file.write("\n" + "") o_file.close() - def _export_g(self, data: Dict[str, np.ndarray], time_step): + def _export_dict_data(self, data: Dict[str, np.ndarray], time_step): """ - Export a single grid (not grid bucket) to a vtu file. + Export single grid to a vtu file with dictionary data. + + Parameters: + data (Dict[str, np.ndarray]): Data to be exported. + time_step (float) : Time step, to be appended at the vtu output file. """ + # Currently this method does only support single grids. + if self.dims.shape[0] > 1: + raise NotImplementedError("Grid buckets with more than one grid are not supported.") + # No need of special naming, create the folder name = self._make_folder(self.folder_name, self.file_name) name = self._make_file_name(name, time_step) @@ -285,20 +292,23 @@ def _export_g(self, data: Dict[str, np.ndarray], time_step): if data is None: data = dict() + # Store the provided data as list of Fields fields: List[Field] = [] if len(data) > 0: fields.extend([Field(n, v) for n, v in data.items()]) + # Extract grid (it is assumed it is the only one) grid = [g for g, _ in self.gb.nodes()][0] + # Add grid dimension to the data grid_dim = grid.dim * np.ones(grid.num_cells, dtype=int) - fields.extend([Field("grid_dim", grid_dim)]) + # Write data to file self._write(fields, name, self.meshio_geom[grid.dim]) - def _export_gb(self, data: List[str], time_step: float) -> None: - """Export the entire GridBucket and additional data to vtu. + def _export_list_data(self, data: List[str], time_step: float) -> None: + """Export the entire GridBucket and additional list data to vtu. Parameters: data (List[str]): Data to be exported in addition to default GridBucket data. From 79ff58174ab5dbf56d7727f3ca394a40132ab366 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Mon, 28 Feb 2022 00:18:08 +0100 Subject: [PATCH 06/83] MAINT: New _export_2d. --- src/porepy/viz/exporter.py | 383 ++++++++++++++++++++++++++++++------- 1 file changed, 318 insertions(+), 65 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index e98db7285d..b71f90c227 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -21,6 +21,7 @@ # Module-wide logger logger = logging.getLogger(__name__) +import time class Field: """ @@ -136,9 +137,8 @@ def __init__( num_m_dims = self.m_dims.size self.m_meshio_geom = dict(zip(self.m_dims, [tuple()] * num_m_dims)) # type: ignore - # Assume numba is available - # TODO rm hardcoded assumption, and move somewhere else - self.has_numba: bool = True + # Check if numba is installed + self._has_numba: bool = "numba" in sys.modules # Generate geometrical information in meshio format self._update_meshio_geom() @@ -518,19 +518,20 @@ def _export_1d( cell_to_nodes = {cell_type: np.empty((num_cells, 2))} # type: ignore # cell id map cell_id = {cell_type: np.empty(num_cells, dtype=int)} # type: ignore - cell_pos = 0 + cell_offset = 0 # points num_pts = np.sum([g.num_nodes for g in gs]) meshio_pts = np.empty((num_pts, 3)) # type: ignore - pts_pos = 0 + nodes_offset = 0 # loop on all the 1d grids for g in gs: # save the points information - sl = slice(pts_pos, pts_pos + g.num_nodes) + sl = slice(nodes_offset, nodes_offset + g.num_nodes) meshio_pts[sl, :] = g.nodes.T + # TODO vectorize, and use directly indices and indptr without sorting # Cell-node relations g_cell_nodes = g.cell_nodes() g_nodes_cells, g_cells, _ = sps.find(g_cell_nodes) @@ -541,11 +542,11 @@ def _export_1d( for c in np.arange(g.num_cells): loc = slice(g_cell_nodes.indptr[c], g_cell_nodes.indptr[c + 1]) # get the local nodes and save them - cell_to_nodes[cell_type][cell_pos, :] = g_nodes_cells[loc] + pts_pos - cell_id[cell_type][cell_pos] = cell_pos - cell_pos += 1 + cell_to_nodes[cell_type][cell_offset, :] = g_nodes_cells[loc] + nodes_offset + cell_id[cell_type][cell_offset] = cell_offset + cell_offset += 1 - pts_pos += g.num_nodes + nodes_offset += g.num_nodes # construct the meshio data structure num_block = len(cell_to_nodes) @@ -558,85 +559,335 @@ def _export_1d( return meshio_pts, meshio_cells, meshio_cell_id + # Light-weight alternative to sort_point_pairs from utils. + # NOTE it returns equivalent (in terms of graph theory) but not equal results. + # TODO not used here, see if this replaces sort_point_pairs in utils. + def _sort_point_pairs(nds): + # TODO proper doc + """Expects an array of size 2 x npts. Each column represents an edge in a cyclic graph, + containing the connected node ids. The algorithm returns a 1D array of size npts with + node ids ordered such that they represent a cycle. + """ + + # First flip point pairs until each point is represented merely once in each row. + for i in range(1,nds.shape[1]): + if min([len(set(nds[j][:i+1])) for j in range(2)]) < i + 1: + nds[0][i], nds[1][i] = nds[1][i], nds[0][i] + + # Safety check. Expect no double appearance. + if min([len(set(nds[j])) for j in range(2)]) != nds.shape[1]: + raise ValueError("Input does not allow sorting.") + + # Convert to list of tuples with tuples and its entries representing + # edges and nodes, resp. + tuple_representation = [(nds[0][i], nds[1][i]) for i in range(nds.shape[1])] + + # Domino sort + sorted_tuple_representation = sorted(tuple_representation) + + # Extract sorted nodes and convert to array + return np.array([n for (n,m) in sorted_tuple_representation], dtype=int) + def _export_2d( self, gs: Iterable[pp.Grid] ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ Export the geometrical data (point coordinates) and connectivity information from the 2d PorePy grids to meshio. - """ - # use standard name for simple object type - polygon_map = {"polygon3": "triangle", "polygon4": "quad"} + Parameters: + gs (Iterable[pp.Grid]): Subdomains. - # cell->nodes connectivity information + Returns: + Tuple[np.ndarray, np.ndarray, np.ndarray]: Points, 2d cells, and cell ids + in correct meshio format. + """ + + # Dictionary storing cell->nodes connectivity information for all + # cell types. For this, the nodes have to be sorted such that + # they form a circular chain, describing the boundary of the cell. cell_to_nodes: Dict[str, np.ndarray] = {} - # cell id map + # Dictionary collecting all cell ids for each cell type. cell_id: Dict[str, List[int]] = {} - # points + # Data structure for storing node coordinates of all 2d grids. num_pts = np.sum([g.num_nodes for g in gs]) meshio_pts = np.empty((num_pts, 3)) # type: ignore - pts_pos = 0 - cell_pos = 0 - # loop on all the 2d grids + # Initialize offsets. Required taking into account multiple 2d grids. + nodes_offset = 0 + cell_offset = 0 + + # Loop over all 2d grids for g in gs: - # save the points information - sl = slice(pts_pos, pts_pos + g.num_nodes) - meshio_pts[sl, :] = g.nodes.T - # Cell-face and face-node relations - g_faces_cells, g_cells, _ = sps.find(g.cell_faces) - # Ensure ordering of the cells - g_faces_cells = g_faces_cells[np.argsort(g_cells)] - g_nodes_faces, _, _ = sps.find(g.face_nodes) + # Store node coordinates + sl = slice(nodes_offset, nodes_offset + g.num_nodes) + meshio_pts[sl, :] = g.nodes.T - # loop on all the grid cells - for c in np.arange(g.num_cells): - loc = slice(g.cell_faces.indptr[c], g.cell_faces.indptr[c + 1]) - # get the nodes for the current cell - nodes = np.array( - [ - g_nodes_faces[ - g.face_nodes.indptr[f] : g.face_nodes.indptr[f + 1] + # TODO vectorize over cell_type? Could keep the previous concept + # of calling each element polygonN with N the number of points per cell. + # This would allow for using optimized numpy operations. + # Finally, polygon3 and polygon4 could be renamed to triangle and quad, + # whereas all other polygonN are merged to a common polygon. + + # Determine cell types based on number of nodes per cell. + num_nodes_per_cell = g.cell_nodes().getnnz(axis=0) + + # Cells with three nodes are identified as triangle cells + triangle_cells = np.nonzero(num_nodes_per_cell == 3)[0] + num_triangle_cells = triangle_cells.shape[0] + if num_triangle_cells > 0: + # Initialize data structures for connectivity and cell ids + if "triangle" not in cell_to_nodes: + cell_to_nodes["triangle"] = np.empty((0,3), dtype=int) + cell_id["triangle"] = [] + # Store triangle cells and add offset taking into account previous grids + cell_id["triangle"] += (triangle_cells + cell_offset).tolist() + + # Cells with four nodes are identified as quad cells + quad_cells = np.nonzero(num_nodes_per_cell == 4)[0] + num_quad_cells = quad_cells.shape[0] + if num_quad_cells > 0: + # Initialize data structures for connectivity and cell ids + if "quad" not in cell_to_nodes: + cell_to_nodes["quad"] = np.empty((0,4), dtype=int) + cell_id["quad"] = [] + # Store quad cells and add offset taking into account previous grids + cell_id["quad"] += (quad_cells + cell_offset).tolist() + + # Cells with more than four nodes are identified as polygons + polygon_cells = np.nonzero(num_nodes_per_cell > 4)[0] + num_polygon_cells = polygon_cells.shape[0] + if num_polygon_cells > 0: + # Initialize data structures for connectivity and cell ids + if "polygon" not in cell_to_nodes: + #cell_to_nodes["polygon"] = np.empty((0,5), dtype=int) # TODO num cols? + cell_id["polygon"] = [] + # Store polygon cells and add offset taking into account previous grids + cell_id["polygon"] += (polygon_cells + cell_offset).tolist() + + # Determine cell-node connectivity - treat triangle, quad and polygonal cells differently + # aiming for optimized performance. + + # 1. Triangles. Proceed in case the grid contains triangles. + if num_triangle_cells > 0: + + # Triangles have a trivial connectivity since all nodes are connected. + + # Extract indptr for triangle cells. Here, we in general loose information about + # the next cell, i.e., the end of the indices corresponding to the selected cells. + triangle_cn_indptr = g.cell_nodes().indptr[triangle_cells] + # Expand indptr, explicitly specifying the location of all nodes for each triangle cell. + # Integrate the explicit knowledge that each triangle cell consists of 3 nodes. + expanded_triangle_cn_indptr = np.vstack([triangle_cn_indptr + i for i in range(3)]).reshape(-1, order="F") + # Detect all corresponding nodes by applying the expanded mask to the indices + expanded_triangle_cn_indices = g.cell_nodes().indices[expanded_triangle_cn_indptr] + # Bring to number of triangle cells x 3 format by reshaping + triangle_cn = np.reshape(expanded_triangle_cn_indices, (-1,3), order="C") + # Add offset + cell_to_nodes["triangle"] = triangle_cn + nodes_offset + + # 2. Quads. Proceed in case the grid contains quads. + if num_quad_cells > 0: + # For quads, g.cell_nodes cannot be blindly used as for triangles, since the + # ordering of the nodes may define a cell of the type + # x--x and not x--x + # \/ | | + # /\ | | + # x--x x--x . + # Therefore, use both g.cell_faces and g.face_nodes to make use of face information + # and sort those correctly to retrieve the correct connectivity., + + # Collect all cell nodes including their connectivity in a matrix of double size. + # The goal will be to gather starting and end points for all faces of each cell, + # sort those faces, such that they form a circlular graph, and then choose the + # resulting starting points of all faces to define the connectivity. + + # First determine all faces of all quad cells. Use an analogous approach as + # used to determine all cell nodes for triangle cells. And use the hardcoded + # info that each quad has four faces. + quad_cf_indptr = g.cell_faces.indptr[quad_cells] + expanded_quad_cf_indptr = np.vstack([quad_cf_indptr + i for i in range(4)]).reshape(-1, order="F") + quad_cf_indices = g.cell_faces.indices[expanded_quad_cf_indptr] + + # Determine the associated (two) nodes of all faces for each cell. Use again an + # analogous approach as for quad cell faces. In particular, include the hardcoded + # info that each face has two nodes. + quad_fn_indptr = g.face_nodes.indptr[quad_cf_indices] + # Extract nodes for first and second node of each face; reshape such + # that all first nodes of all faces for each cell are stored in one row, + # i.e., end up with an array of size num_quad_cells x 4. + quad_cfn_indices = [g.face_nodes.indices[quad_fn_indptr + i].reshape(-1,4) for i in range(2)] + # Group first and second nodes, with alternating order of rows. + # By this, each cell is respresented by two rows. The first and second + # rows contain first and second nodes of faces. And each column stands + # for one face. + quad_cfn = np.ravel(quad_cfn_indices, order="F").reshape(4,-1).T + + # Sort faces for each cell such that they form a chain. Use a function + # compiled with Numba. Currently, no alternative is provided when Numba + # is not installed. This step is the bottleneck of this routine. + if not self._has_numba: + raise NotImplementedError("The sorting algorithm requires numba to be installed.") + quad_cfn = self._sort_point_pairs_numba(quad_cfn).astype(int) + + # For each cell pick the sorted nodes such that they form a chain and thereby + # define the connectivity. + quad_cn = quad_cfn[np.arange(0,2*num_quad_cells,2),:] + + # Add offset to account for multiple grids + cell_to_nodes["quad"] = quad_cn + nodes_offset + + # 3. General polygons. Proceed in case the grid contains polygons. + if num_polygon_cells > 0: + + # Cell-face and face-node relations + g_faces_cells, g_cells, _ = sps.find(g.cell_faces) + # Ensure ordering of the cells + g_faces_cells = g_faces_cells[np.argsort(g_cells)] + g_nodes_faces, _, _ = sps.find(g.face_nodes) + + # loop on all the grid cells + for c in polygon_cells: + loc = slice(g.cell_faces.indptr[c], g.cell_faces.indptr[c + 1]) + + # get the nodes for the current cell + nodes = np.array( + [ + g_nodes_faces[ + g.face_nodes.indptr[f] : g.face_nodes.indptr[f + 1] + ] + for f in g_faces_cells[loc] ] - for f in g_faces_cells[loc] - ] - ).T - # sort the nodes - nodes_loc, *_ = pp.utils.sort_points.sort_point_pairs(nodes) + ).T - # define the type of cell we are currently saving - cell_type_raw = "polygon" + str(nodes_loc.shape[1]) + # sort the nodes + nodes_loc, *_ = pp.utils.sort_points.sort_point_pairs(nodes) - default_type = "polygon" - # Map to triangle/quad if relevant, or simple polygon for general cells. - cell_type = polygon_map.get(cell_type_raw, default_type) - # if the cell type is not present, then add it - if cell_type not in cell_to_nodes: - cell_to_nodes[cell_type] = np.atleast_2d(nodes_loc[0] + pts_pos) - cell_id[cell_type] = [cell_pos] - else: - cell_to_nodes[cell_type] = np.vstack( - (cell_to_nodes[cell_type], nodes_loc[0] + pts_pos) - ) - cell_id[cell_type] += [cell_pos] - cell_pos += 1 + # TODO rm. + ## define the type of cell we are currently saving + #cell_type_raw = "polygon" + str(nodes_loc.shape[1]) - pts_pos += g.num_nodes + #default_type = "polygon" + ## Map to triangle/quad if relevant, or simple polygon for general cells. + #cell_type = polygon_map.get(cell_type_raw, default_type) + cell_type = "polygon" - # construct the meshio data structure + # Gather cells of same type, and store both cell ids and connectivity patterns + # for each (stored through sorted cell nodes). + if cell_type not in cell_to_nodes: + # Initialize data structures if cell type not present + cell_to_nodes[cell_type] = np.atleast_2d(nodes_loc[0] + nodes_offset) + else: + # Extend existing data structures + cell_to_nodes[cell_type] = np.vstack( + (cell_to_nodes[cell_type], nodes_loc[0] + nodes_offset) + ) + + # Update offset + nodes_offset += g.num_nodes + cell_offset += g.num_cells + + # Construct the meshio data structure num_block = len(cell_to_nodes) meshio_cells = np.empty(num_block, dtype=object) meshio_cell_id = np.empty(num_block, dtype=object) + # For each cell_type store the connectivity pattern cell_to_nodes for + # the corresponding cells with ids from cell_id. for block, (cell_type, cell_block) in enumerate(cell_to_nodes.items()): meshio_cells[block] = meshio.CellBlock(cell_type, cell_block.astype(int)) meshio_cell_id[block] = np.array(cell_id[cell_type]) return meshio_pts, meshio_cells, meshio_cell_id + def _sort_point_pairs_numba(self, lines): + """This a simplified copy of pp.utils.sort_points.sort_point_pairs. + + Essentially the same functionality as sort_point_pairs, but stripped down + to the special case of circular chains. Different to sort_point_pairs, this + variant sorts an arbitrary amount of independent point pairs. The chains + are restricted by the assumption that each contains equally many line segments. + Finally, this routine uses numba. + + Parameters: + lines (np.ndarray): Array of size 2 * num_chains x num_lines_per_chain, + containing node indices. For each pair of two rows, each column + represents a line segment connectng the two nodes in the two entries + of this column. + + Returns: + np.ndarray: Sorted version of lines, where for each chain, the collection + of line segments has been potentially flipped and sorted. + """ + + import numba + + @numba.jit(nopython=True) + def _function_to_compile(lines): + """Copy of pp.utils.sort_points.sort_point_pairs. This version is extended + to multiple chains. Each chain is implicitly assumed to be circular.""" + + # Retrieve number of chains and lines per chain from the shape. + # Implicitly expect that all chains have the same length + num_chains, chain_length = lines.shape + # Since for each chain lines includes two rows, take the half + num_chains = int(num_chains/2) + + # Initialize array of sorted lines to be the final output + sorted_lines = np.zeros((2*num_chains,chain_length)) + # Fix the first line segment for each chain and identify + # it as in place regarding the sorting. + sorted_lines[:, 0] = lines[:, 0] + # Keep track of which lines have been fixed and which are still candidates + found = np.zeros(chain_length) + found[0] = 1 + + # Loop over chains and Consider each chain separately. + for c in range(num_chains): + # Initialize found making any line segment aside of the first a candidate + found[1:] = 0 + + # Define the end point of the previous and starting point for the next line segment + prev = sorted_lines[2*c+1, 0] + + # The sorting algorithm: Loop over all position in the chain to be set next. + # Find the right candidate to be moved to this position and possibly flipped + # if needed. A candidate is identified as fitting if it contains one point + # equal to the current starting point. This algorithm uses a double loop, + # which is the most naive approach. However, assume chain_length is in + # general small. + for i in range(1, chain_length): # The first line has already been found + for j in range(1, chain_length): # The first line has already been found + # A candidate line segment with matching start and end point + # in the first component of the point pair. + if np.abs(found[j]) < 1e-6 and lines[2*c, j] == prev: + # Copy the segment to the right place + sorted_lines[2*c:2*c+2, i] = lines[2*c:2*c+2, j] + # Mark as used + found[j] = 1 + # Define the starting point for the next line segment + prev = lines[2*c+1, j] + break + # A candidate line segment with matching start and end point + # in the second component of the point pair. + elif np.abs(found[j])<1e-6 and lines[2*c+1, j] == prev: + # Flip and copy the segment to the right place + sorted_lines[2*c, i] = lines[2*c+1, j] + sorted_lines[2*c+1, i] = lines[2*c, j] + # Mark as used + found[j] = 1 + # Define the starting point for the next line segment + prev = lines[2*c, j] + break + + # Return the sorted lines defining chains. + return sorted_lines + + # Run numba compiled function + return _function_to_compile(lines) + def _export_3d( self, gs: Iterable[pp.Grid] ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: @@ -658,20 +909,22 @@ def _export_3d( # points num_pts = np.sum([g.num_nodes for g in gs]) meshio_pts = np.empty((num_pts, 3)) # type: ignore - pts_pos = 0 - cell_pos = 0 + + # offsets + nodes_offset = 0 + cell_pos = 0 # TODO rm and include offset # loop on all the 3d grids for g in gs: # save the points information - sl = slice(pts_pos, pts_pos + g.num_nodes) + sl = slice(nodes_offset, nodes_offset + g.num_nodes) meshio_pts[sl, :] = g.nodes.T + # TODO rm # Cell-face and face-node relations g_faces_cells, g_cells, _ = sps.find(g.cell_faces) # Ensure ordering of the cells g_faces_cells = g_faces_cells[np.argsort(g_cells)] - g_nodes_faces, _, _ = sps.find(g.face_nodes) cptr = g.cell_faces.indptr @@ -690,7 +943,7 @@ def _export_3d( # case the pure python version probably is faster than combined compile # and runtime for numba # The number 1000 here is somewhat random. - if self.has_numba and g.num_cells > 1000: + if self._has_numba and g.num_cells > 1000: logger.info("Construct 3d grid information using numba") cell_nodes = self._point_ind_numba( cptr, @@ -739,18 +992,18 @@ def _export_3d( # if the cell type is not present, then add it if cell_type not in cell_to_nodes: - cell_to_nodes[cell_type] = np.atleast_2d(nodes_loc + pts_pos) + cell_to_nodes[cell_type] = np.atleast_2d(nodes_loc + nodes_offset) cell_to_faces[cell_type] = [faces_loc] cell_id[cell_type] = [cell_pos] else: cell_to_nodes[cell_type] = np.vstack( - (cell_to_nodes[cell_type], nodes_loc + pts_pos) + (cell_to_nodes[cell_type], nodes_loc + nodes_offset) ) cell_to_faces[cell_type] += [faces_loc] cell_id[cell_type] += [cell_pos] cell_pos += 1 - pts_pos += g.num_nodes + nodes_offset += g.num_nodes # NOTE: only cell->faces relation will be kept, so far we export only polyhedron # otherwise in the next lines we should consider also the cell->nodes map From 1255e06545da7e9bbec1230ca23b4fd5439db5a7 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Thu, 3 Mar 2022 15:25:20 +0100 Subject: [PATCH 07/83] MAINT: New _export_3d. --- src/porepy/viz/exporter.py | 444 +++++++++++-------------------------- 1 file changed, 129 insertions(+), 315 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index b71f90c227..f0502b10fc 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -549,9 +549,9 @@ def _export_1d( nodes_offset += g.num_nodes # construct the meshio data structure - num_block = len(cell_to_nodes) - meshio_cells = np.empty(num_block, dtype=object) - meshio_cell_id = np.empty(num_block, dtype=object) + num_blocks = len(cell_to_nodes) + meshio_cells = np.empty(num_blocks, dtype=object) + meshio_cell_id = np.empty(num_blocks, dtype=object) for block, (cell_type, cell_block) in enumerate(cell_to_nodes.items()): meshio_cells[block] = meshio.CellBlock(cell_type, cell_block.astype(int)) @@ -559,35 +559,6 @@ def _export_1d( return meshio_pts, meshio_cells, meshio_cell_id - # Light-weight alternative to sort_point_pairs from utils. - # NOTE it returns equivalent (in terms of graph theory) but not equal results. - # TODO not used here, see if this replaces sort_point_pairs in utils. - def _sort_point_pairs(nds): - # TODO proper doc - """Expects an array of size 2 x npts. Each column represents an edge in a cyclic graph, - containing the connected node ids. The algorithm returns a 1D array of size npts with - node ids ordered such that they represent a cycle. - """ - - # First flip point pairs until each point is represented merely once in each row. - for i in range(1,nds.shape[1]): - if min([len(set(nds[j][:i+1])) for j in range(2)]) < i + 1: - nds[0][i], nds[1][i] = nds[1][i], nds[0][i] - - # Safety check. Expect no double appearance. - if min([len(set(nds[j])) for j in range(2)]) != nds.shape[1]: - raise ValueError("Input does not allow sorting.") - - # Convert to list of tuples with tuples and its entries representing - # edges and nodes, resp. - tuple_representation = [(nds[0][i], nds[1][i]) for i in range(nds.shape[1])] - - # Domino sort - sorted_tuple_representation = sorted(tuple_representation) - - # Extract sorted nodes and convert to array - return np.array([n for (n,m) in sorted_tuple_representation], dtype=int) - def _export_2d( self, gs: Iterable[pp.Grid] ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: @@ -599,8 +570,8 @@ def _export_2d( gs (Iterable[pp.Grid]): Subdomains. Returns: - Tuple[np.ndarray, np.ndarray, np.ndarray]: Points, 2d cells, and cell ids - in correct meshio format. + Tuple[np.ndarray, np.ndarray, np.ndarray]: Points, 2d cells (storing the + connectivity), and cell ids in correct meshio format. """ # Dictionary storing cell->nodes connectivity information for all @@ -631,6 +602,8 @@ def _export_2d( # Finally, polygon3 and polygon4 could be renamed to triangle and quad, # whereas all other polygonN are merged to a common polygon. + # See export_3d. + # Determine cell types based on number of nodes per cell. num_nodes_per_cell = g.cell_nodes().getnnz(axis=0) @@ -740,6 +713,7 @@ def _export_2d( cell_to_nodes["quad"] = quad_cn + nodes_offset # 3. General polygons. Proceed in case the grid contains polygons. + # TODO rm. if num_polygon_cells > 0: # Cell-face and face-node relations @@ -790,9 +764,9 @@ def _export_2d( cell_offset += g.num_cells # Construct the meshio data structure - num_block = len(cell_to_nodes) - meshio_cells = np.empty(num_block, dtype=object) - meshio_cell_id = np.empty(num_block, dtype=object) + num_blocks = len(cell_to_nodes) + meshio_cells = np.empty(num_blocks, dtype=object) + meshio_cell_id = np.empty(num_blocks, dtype=object) # For each cell_type store the connectivity pattern cell_to_nodes for # the corresponding cells with ids from cell_id. @@ -894,129 +868,151 @@ def _export_3d( """ Export the geometrical data (point coordinates) and connectivity information from the 3d PorePy grids to meshio. + + Parameters: + gs (Iterable[pp.Grid]): Subdomains. + + Returns: + Tuple[np.ndarray, np.ndarray, np.ndarray]: Points, 3d cells (storing the + connectivity), and cell ids in correct meshio format. """ - # use standard name for simple object type - # NOTE: this part is not developed - # polygon_map = {"polyhedron4": "tetra", "polyhedron8": "hexahedron"} + # Use standard name for simple object types: in this routine only tetra cells + # are treated in a special manner. + polyhedron_map = {"polyhedron4": "tetra"} - # cell-faces and cell nodes connectivity information + # Dictionaries storing cell->faces and cell->nodes connectivity information + # for all cell types. For this, the nodes have to be sorted such that + # they form a circular chain, describing the boundary of the cell. cell_to_faces: Dict[str, List[List[int]]] = {} cell_to_nodes: Dict[str, np.ndarray] = {} - # cell id map + # Dictionary collecting all cell ids for each cell type. cell_id: Dict[str, List[int]] = {} - # points + # Data structure for storing node coordinates of all 2d grids. num_pts = np.sum([g.num_nodes for g in gs]) meshio_pts = np.empty((num_pts, 3)) # type: ignore - # offsets + # Initialize offsets. Required taking into account multiple 2d grids. nodes_offset = 0 - cell_pos = 0 # TODO rm and include offset + cell_offset = 0 - # loop on all the 3d grids + # Loop over all 3d grids for g in gs: - # save the points information + + # Store node coordinates sl = slice(nodes_offset, nodes_offset + g.num_nodes) meshio_pts[sl, :] = g.nodes.T - # TODO rm - # Cell-face and face-node relations - g_faces_cells, g_cells, _ = sps.find(g.cell_faces) - # Ensure ordering of the cells - g_faces_cells = g_faces_cells[np.argsort(g_cells)] - g_nodes_faces, _, _ = sps.find(g.face_nodes) - - cptr = g.cell_faces.indptr - fptr = g.face_nodes.indptr - face_per_cell = np.diff(cptr) - nodes_per_face = np.diff(fptr) - - # Total number of nodes to be written in the face-node relation - num_cell_nodes = np.array([nodes_per_face[i] for i in g.cell_faces.indices]) - - n = g.nodes - fc = g.face_centers - normal_vec = g.face_normals / g.face_areas - - # Use numba if available, unless the problem is very small, in which - # case the pure python version probably is faster than combined compile - # and runtime for numba - # The number 1000 here is somewhat random. - if self._has_numba and g.num_cells > 1000: - logger.info("Construct 3d grid information using numba") - cell_nodes = self._point_ind_numba( - cptr, - fptr, - g_faces_cells, - g_nodes_faces, - n, - fc, - normal_vec, - num_cell_nodes, - ) - else: - logger.info("Construct 3d grid information using pure python") - cell_nodes = self._point_ind( - cptr, - fptr, - g_faces_cells, - g_nodes_faces, - n, - fc, - normal_vec, - num_cell_nodes, - ) - - # implementation note: I did not even try feeding this to numba, my - # guess is that it will not like the vtk specific stuff. - nc = 0 - f_counter = 0 + # Determine cell types based on number of nodes per cell. + num_faces_per_cell = g.cell_faces.getnnz(axis=0) + + # Loop over all available cell types and group cells of one type. + # Init first data corresponding structure. + g_cell_map = dict() + for n in np.unique(num_faces_per_cell): + # Define cell type; check if it coincides with a predefined cell type + cell_type = polyhedron_map.get(f"polyhedron{n}", f"polyhedron{n}") + # Find all cells with n faces, and store for later use + cells = np.nonzero(num_faces_per_cell == n)[0] + g_cell_map[cell_type] = cells + # Store cell ids in global container; init if entry not yet established + if cell_type not in cell_id: + cell_id[cell_type] = [] + # Add offset taking into account previous grids + cell_id[cell_type] += (cells + cell_offset).tolist() + + # Determine connectivity. Loop over available cell types, and treat tetrahedra + # and general polyhedra differently aiming for optimized performance. + + for n in np.unique(num_faces_per_cell): + + # Define the cell type + cell_type = polyhedron_map.get(f"polyhedron{n}", f"polyhedron{n}") + + # Special case: Tetrahedra + if cell_type == "tetra": + # Since tetra is a meshio-known cell type, cell_to_nodes connectivity information + # is provided, i.e., for each cell the nodes in the meshio-defined local numbering + # of nodes is generated. Since tetra is a simplex, the nodes do not need to be + # ordered in any specific type, as the geometrical object is invariant under + # permutations. + + # Determine all tetra cells + cells = g_cell_map[cell_type] + # Extract indptr for all tetra cells. + cn_indptr = g.cell_nodes().indptr[cells] + # Expand indptr, explicitly specifying the location of all nodes for each tetra cell. + # Integrate the explicit knowledge that each tetra cell consists of 4 nodes. + expanded_cn_indptr = np.vstack([cn_indptr + i for i in range(4)]).reshape(-1, order="F") + # Detect all corresponding nodes by applying the expanded mask to the indices + cn_indices = g.cell_nodes().indices[expanded_cn_indptr] + # Bring to number of num tetra cells x 4 format by reshaping + cn_indices = np.reshape(cn_indices, (-1,4), order="C") + # Initialize data structure if not available yet + if cell_type not in cell_to_nodes: + cell_to_nodes[cell_type] = np.empty((0,4), dtype=int) + # Store cell-node connectivity, and add offset taking into account previous grids + cell_to_nodes[cell_type] = cn_indices + nodes_offset - # loop on all the grid cells - for c in np.arange(g.num_cells): - faces_loc: List[int] = [] - # loop on all the cell faces - for _ in np.arange(face_per_cell[c]): - fi = g.cell_faces.indices[f_counter] - faces_loc += [cell_nodes[nc : (nc + nodes_per_face[fi])]] - nc += nodes_per_face[fi] - f_counter += 1 - - # collect all the nodes for the cell - nodes_loc = np.unique(np.hstack(faces_loc)).astype(int) - - # define the type of cell we are currently saving - cell_type = "polyhedron" + str(nodes_loc.size) - # cell_type = polygon_map.get(cell_type, cell_type) - - # if the cell type is not present, then add it - if cell_type not in cell_to_nodes: - cell_to_nodes[cell_type] = np.atleast_2d(nodes_loc + nodes_offset) - cell_to_faces[cell_type] = [faces_loc] - cell_id[cell_type] = [cell_pos] + # Identify remaining cells as polyhedra else: - cell_to_nodes[cell_type] = np.vstack( - (cell_to_nodes[cell_type], nodes_loc + nodes_offset) - ) - cell_to_faces[cell_type] += [faces_loc] - cell_id[cell_type] += [cell_pos] - cell_pos += 1 + # The general strategy is to define the connectivity as cell-face information, + # where the faces are defined by nodes. Hence, this information is significantly + # larger than the info provided for tetra cells. Here, we make use of the fact + # that g.face_nodes provides nodes ordered wrt. the right-hand rule. + + # Determine all cells with n faces + cells = g_cell_map[cell_type] + + # Store short cuts to cell-face and face-node information + cf_indptr = g.cell_faces.indptr + cf_indices = g.cell_faces.indices + fn_indptr = g.face_nodes.indptr + fn_indices = g.face_nodes.indices + + # Determine the cell-face connectivity (with faces described by their + # nodes ordered such that they form a chain and are identified by the + # face boundary. The final data format is a List[List[np.ndarray]]. + # The outer list, loops over all cells. Each cell entry contains a + # list over faces, and each face entry is given by the face nodes. + if cell_type not in cell_to_faces: + cell_to_faces[cell_type] = [] + cell_to_faces[cell_type] = [ + [ + fn_indices[fn_indptr[f] : fn_indptr[f+1]] # nodes + for f in cf_indices[cf_indptr[c]:cf_indptr[c+1]] # faces + ] for c in cells # cells + ] + # Update offset nodes_offset += g.num_nodes + cell_offset += g.num_cells - # NOTE: only cell->faces relation will be kept, so far we export only polyhedron - # otherwise in the next lines we should consider also the cell->nodes map + # Determine the total number of blocks. Recall that tetra and general + # polyhedron{n} cells are stored differently. + num_tetra_blocks = len(cell_to_nodes) + num_polyhedron_blocks = len(cell_to_faces) + num_blocks = num_tetra_blocks + num_polyhedron_blocks - # construct the meshio data structure - num_block = len(cell_to_nodes) - meshio_cells = np.empty(num_block, dtype=object) - meshio_cell_id = np.empty(num_block, dtype=object) + # Initialize the meshio data structure for the connectivity and cell ids. + meshio_cells = np.empty(num_blocks, dtype=object) + meshio_cell_id = np.empty(num_blocks, dtype=object) + # Store tetra cells. Use cell_to_nodes to store the connectivity info. + for block, (cell_type, cell_block) in enumerate(cell_to_nodes.items()): + meshio_cells[block] = meshio.CellBlock(cell_type, cell_block.astype(int)) + meshio_cell_id[block] = np.array(cell_id[cell_type]) + + # Store tetra cells first. Use the more general cell_to_faces to store + # the connectivity info. for block, (cell_type, cell_block) in enumerate(cell_to_faces.items()): + # Adapt the block number taking into account of previous cell types. + block += num_tetra_blocks meshio_cells[block] = meshio.CellBlock(cell_type, cell_block) meshio_cell_id[block] = np.array(cell_id[cell_type]) + # Return final meshio data: points, cell (connectivity), cell ids return meshio_pts, meshio_cells, meshio_cell_id def _write(self, fields: Iterable[Field], file_name: str, meshio_geom) -> None: @@ -1108,185 +1104,3 @@ def _make_file_name_mortar( else: time = str(time_step).zfill(padding) return file_name + str(dim) + "_" + time + extension - - ### Below follows utility functions for sorting points in 3d - - def _point_ind( - self, - cell_ptr, - face_ptr, - face_cells, - nodes_faces, - nodes, - fc, - normals, - num_cell_nodes, - ): - cell_nodes = np.zeros(num_cell_nodes.sum(), dtype=int) - counter = 0 - for ci in range(cell_ptr.size - 1): - loc_c = slice(cell_ptr[ci], cell_ptr[ci + 1]) - - for fi in face_cells[loc_c]: - loc_f = slice(face_ptr[fi], face_ptr[fi + 1]) - ptsId = nodes_faces[loc_f] - num_p_loc = ptsId.size - nodes_loc = nodes[:, ptsId] - # Sort points. Cut-down version of - # sort_points.sort_points_plane() and subfunctions - reference = np.array([0.0, 0.0, 1]) - angle = np.arccos(np.dot(normals[:, fi], reference)) - vect = np.cross(normals[:, fi], reference) - # Cut-down version of cg.rot() - W = np.array( - [ - [0.0, -vect[2], vect[1]], - [vect[2], 0.0, -vect[0]], - [-vect[1], vect[0], 0.0], - ] - ) - R = ( - np.identity(3) - + np.sin(angle) * W - + (1.0 - np.cos(angle)) * np.linalg.matrix_power(W, 2) - ) - # pts is now a npt x 3 matrix - pts = np.array( - [R.dot(nodes_loc[:, i]) for i in range(nodes_loc.shape[1])] - ) - center = R.dot(fc[:, fi]) - # Distance from projected points to center - delta = np.array([pts[i] - center for i in range(pts.shape[0])])[:, :2] - nrm = np.sqrt(delta[:, 0] ** 2 + delta[:, 1] ** 2) - delta = delta / nrm[:, np.newaxis] - - argsort = np.argsort(np.arctan2(delta[:, 0], delta[:, 1])) - cell_nodes[counter : (counter + num_p_loc)] = ptsId[argsort] - counter += num_p_loc - - return cell_nodes - - def _point_ind_numba( - self, - cell_ptr, - face_ptr, - faces_cells, - nodes_faces, - nodes, - fc, - normals, - num_cell_nodes, - ): - import numba - - @numba.jit( - "i4[:](i4[:],i4[:],i4[:],i4[:],f8[:,:],f8[:,:],f8[:,:],i4[:])", - nopython=True, - nogil=False, - ) - def _function_to_compile( - cell_ptr, - face_ptr, - faces_cells, - nodes_faces, - nodes, - fc, - normals, - num_cell_nodes, - ): - """Implementation note: This turned out to be less than pretty, and quite - a bit more explicit than the corresponding pure python implementation. - The process was basically to circumvent whatever statements numba did not - like. Not sure about why this ended so, but there you go. - """ - cell_nodes = np.zeros(num_cell_nodes.sum(), dtype=np.int32) - counter = 0 - fc.astype(np.float64) - for ci in range(cell_ptr.size - 1): - loc_c = slice(cell_ptr[ci], cell_ptr[ci + 1]) - for fi in faces_cells[loc_c]: - loc_f = np.arange(face_ptr[fi], face_ptr[fi + 1]) - ptsId = nodes_faces[loc_f] - num_p_loc = ptsId.size - nodes_loc = np.zeros((3, num_p_loc)) - for iter1 in range(num_p_loc): - nodes_loc[:, iter1] = nodes[:, ptsId[iter1]] - # # Sort points. Cut-down version of - # # sort_points.sort_points_plane() and subfunctions - reference = np.array([0.0, 0.0, 1]) - angle = np.arccos(np.dot(normals[:, fi], reference)) - # Hand code cross product, not supported by current numba version - vect = np.array( - [ - normals[1, fi] * reference[2] - - normals[2, fi] * reference[1], - normals[2, fi] * reference[0] - - normals[0, fi] * reference[2], - normals[0, fi] * reference[1] - - normals[1, fi] * reference[0], - ], - dtype=np.float64, - ) - ## # Cut-down version of cg.rot() - W = np.array( - [ - 0.0, - -vect[2], - vect[1], - vect[2], - 0.0, - -vect[0], - -vect[1], - vect[0], - 0.0, - ] - ).reshape((3, 3)) - R = ( - np.identity(3) - + np.sin(angle) * W.reshape((3, 3)) - + ( - (1.0 - np.cos(angle)) * np.linalg.matrix_power(W, 2).ravel() - ).reshape((3, 3)) - ) - ## # pts is now a npt x 3 matrix - num_p = nodes_loc.shape[1] - pts = np.zeros((3, num_p)) - fc_loc = fc[:, fi] - center = np.zeros(3) - for i in range(3): - center[i] = ( - R[i, 0] * fc_loc[0] - + R[i, 1] * fc_loc[1] - + R[i, 2] * fc_loc[2] - ) - for i in range(num_p): - for j in range(3): - pts[j, i] = ( - R[j, 0] * nodes_loc[0, i] - + R[j, 1] * nodes_loc[1, i] - + R[j, 2] * nodes_loc[2, i] - ) - ## # Distance from projected points to center - delta = 0 * pts - for i in range(num_p): - delta[:, i] = pts[:, i] - center - nrm = np.sqrt(delta[0] ** 2 + delta[1] ** 2) - for i in range(num_p): - delta[:, i] = delta[:, i] / nrm[i] - ## - argsort = np.argsort(np.arctan2(delta[0], delta[1])) - cell_nodes[counter : (counter + num_p_loc)] = ptsId[argsort] - counter += num_p_loc - - return cell_nodes - - return _function_to_compile( - cell_ptr, - face_ptr, - faces_cells, - nodes_faces, - nodes, - fc, - normals, - num_cell_nodes, - ) From 125bc15a14cfe56d57564ef03a89354be840d9fb Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Thu, 3 Mar 2022 22:59:46 +0100 Subject: [PATCH 08/83] MAINT: Apply same concepts and styles from _export_3d to _export_2d. --- src/porepy/viz/exporter.py | 277 +++++++++++++++---------------------- 1 file changed, 113 insertions(+), 164 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index f0502b10fc..28b3276270 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -574,6 +574,10 @@ def _export_2d( connectivity), and cell ids in correct meshio format. """ + # Use standard names for simple object types: in this routine only triangle + # and quad cells are treated in a special manner. + polygon_map = {"polygon3": "triangle", "polygon4": "quad"} + # Dictionary storing cell->nodes connectivity information for all # cell types. For this, the nodes have to be sorted such that # they form a circular chain, describing the boundary of the cell. @@ -596,168 +600,105 @@ def _export_2d( sl = slice(nodes_offset, nodes_offset + g.num_nodes) meshio_pts[sl, :] = g.nodes.T - # TODO vectorize over cell_type? Could keep the previous concept - # of calling each element polygonN with N the number of points per cell. - # This would allow for using optimized numpy operations. - # Finally, polygon3 and polygon4 could be renamed to triangle and quad, - # whereas all other polygonN are merged to a common polygon. + # Determine cell types based on number of faces=nodes per cell. + num_faces_per_cell = g.cell_faces.getnnz(axis=0) - # See export_3d. + # Loop over all available cell types and group cells of one type. + g_cell_map = dict() + for n in np.unique(num_faces_per_cell): + # Define cell type; check if it coincides with a predefined cell type + cell_type = polygon_map.get(f"polygon{n}", f"polygon{n}") + # Find all cells with n faces, and store for later use + cells = np.nonzero(num_faces_per_cell == n)[0] + g_cell_map[cell_type] = cells + # Store cell ids in global container; init if entry not yet established + if cell_type not in cell_id: + cell_id[cell_type] = [] + # Add offset taking into account previous grids + cell_id[cell_type] += (cells + cell_offset).tolist() - # Determine cell types based on number of nodes per cell. - num_nodes_per_cell = g.cell_nodes().getnnz(axis=0) - - # Cells with three nodes are identified as triangle cells - triangle_cells = np.nonzero(num_nodes_per_cell == 3)[0] - num_triangle_cells = triangle_cells.shape[0] - if num_triangle_cells > 0: - # Initialize data structures for connectivity and cell ids - if "triangle" not in cell_to_nodes: - cell_to_nodes["triangle"] = np.empty((0,3), dtype=int) - cell_id["triangle"] = [] - # Store triangle cells and add offset taking into account previous grids - cell_id["triangle"] += (triangle_cells + cell_offset).tolist() - - # Cells with four nodes are identified as quad cells - quad_cells = np.nonzero(num_nodes_per_cell == 4)[0] - num_quad_cells = quad_cells.shape[0] - if num_quad_cells > 0: - # Initialize data structures for connectivity and cell ids - if "quad" not in cell_to_nodes: - cell_to_nodes["quad"] = np.empty((0,4), dtype=int) - cell_id["quad"] = [] - # Store quad cells and add offset taking into account previous grids - cell_id["quad"] += (quad_cells + cell_offset).tolist() - - # Cells with more than four nodes are identified as polygons - polygon_cells = np.nonzero(num_nodes_per_cell > 4)[0] - num_polygon_cells = polygon_cells.shape[0] - if num_polygon_cells > 0: - # Initialize data structures for connectivity and cell ids - if "polygon" not in cell_to_nodes: - #cell_to_nodes["polygon"] = np.empty((0,5), dtype=int) # TODO num cols? - cell_id["polygon"] = [] - # Store polygon cells and add offset taking into account previous grids - cell_id["polygon"] += (polygon_cells + cell_offset).tolist() - - # Determine cell-node connectivity - treat triangle, quad and polygonal cells differently + # Determine cell-node connectivity for each cell type and all cells. + # Treat triangle, quad and polygonal cells differently # aiming for optimized performance. + for n in np.unique(num_faces_per_cell): - # 1. Triangles. Proceed in case the grid contains triangles. - if num_triangle_cells > 0: - - # Triangles have a trivial connectivity since all nodes are connected. - - # Extract indptr for triangle cells. Here, we in general loose information about - # the next cell, i.e., the end of the indices corresponding to the selected cells. - triangle_cn_indptr = g.cell_nodes().indptr[triangle_cells] - # Expand indptr, explicitly specifying the location of all nodes for each triangle cell. - # Integrate the explicit knowledge that each triangle cell consists of 3 nodes. - expanded_triangle_cn_indptr = np.vstack([triangle_cn_indptr + i for i in range(3)]).reshape(-1, order="F") - # Detect all corresponding nodes by applying the expanded mask to the indices - expanded_triangle_cn_indices = g.cell_nodes().indices[expanded_triangle_cn_indptr] - # Bring to number of triangle cells x 3 format by reshaping - triangle_cn = np.reshape(expanded_triangle_cn_indices, (-1,3), order="C") - # Add offset - cell_to_nodes["triangle"] = triangle_cn + nodes_offset - - # 2. Quads. Proceed in case the grid contains quads. - if num_quad_cells > 0: - # For quads, g.cell_nodes cannot be blindly used as for triangles, since the - # ordering of the nodes may define a cell of the type - # x--x and not x--x - # \/ | | - # /\ | | - # x--x x--x . - # Therefore, use both g.cell_faces and g.face_nodes to make use of face information - # and sort those correctly to retrieve the correct connectivity., - - # Collect all cell nodes including their connectivity in a matrix of double size. - # The goal will be to gather starting and end points for all faces of each cell, - # sort those faces, such that they form a circlular graph, and then choose the - # resulting starting points of all faces to define the connectivity. - - # First determine all faces of all quad cells. Use an analogous approach as - # used to determine all cell nodes for triangle cells. And use the hardcoded - # info that each quad has four faces. - quad_cf_indptr = g.cell_faces.indptr[quad_cells] - expanded_quad_cf_indptr = np.vstack([quad_cf_indptr + i for i in range(4)]).reshape(-1, order="F") - quad_cf_indices = g.cell_faces.indices[expanded_quad_cf_indptr] - - # Determine the associated (two) nodes of all faces for each cell. Use again an - # analogous approach as for quad cell faces. In particular, include the hardcoded - # info that each face has two nodes. - quad_fn_indptr = g.face_nodes.indptr[quad_cf_indices] - # Extract nodes for first and second node of each face; reshape such - # that all first nodes of all faces for each cell are stored in one row, - # i.e., end up with an array of size num_quad_cells x 4. - quad_cfn_indices = [g.face_nodes.indices[quad_fn_indptr + i].reshape(-1,4) for i in range(2)] - # Group first and second nodes, with alternating order of rows. - # By this, each cell is respresented by two rows. The first and second - # rows contain first and second nodes of faces. And each column stands - # for one face. - quad_cfn = np.ravel(quad_cfn_indices, order="F").reshape(4,-1).T - - # Sort faces for each cell such that they form a chain. Use a function - # compiled with Numba. Currently, no alternative is provided when Numba - # is not installed. This step is the bottleneck of this routine. - if not self._has_numba: - raise NotImplementedError("The sorting algorithm requires numba to be installed.") - quad_cfn = self._sort_point_pairs_numba(quad_cfn).astype(int) - - # For each cell pick the sorted nodes such that they form a chain and thereby - # define the connectivity. - quad_cn = quad_cfn[np.arange(0,2*num_quad_cells,2),:] - - # Add offset to account for multiple grids - cell_to_nodes["quad"] = quad_cn + nodes_offset - - # 3. General polygons. Proceed in case the grid contains polygons. - # TODO rm. - if num_polygon_cells > 0: - - # Cell-face and face-node relations - g_faces_cells, g_cells, _ = sps.find(g.cell_faces) - # Ensure ordering of the cells - g_faces_cells = g_faces_cells[np.argsort(g_cells)] - g_nodes_faces, _, _ = sps.find(g.face_nodes) - - # loop on all the grid cells - for c in polygon_cells: - loc = slice(g.cell_faces.indptr[c], g.cell_faces.indptr[c + 1]) - - # get the nodes for the current cell - nodes = np.array( - [ - g_nodes_faces[ - g.face_nodes.indptr[f] : g.face_nodes.indptr[f + 1] - ] - for f in g_faces_cells[loc] - ] - ).T - - # sort the nodes - nodes_loc, *_ = pp.utils.sort_points.sort_point_pairs(nodes) - - # TODO rm. - ## define the type of cell we are currently saving - #cell_type_raw = "polygon" + str(nodes_loc.shape[1]) - - #default_type = "polygon" - ## Map to triangle/quad if relevant, or simple polygon for general cells. - #cell_type = polygon_map.get(cell_type_raw, default_type) - cell_type = "polygon" - - # Gather cells of same type, and store both cell ids and connectivity patterns - # for each (stored through sorted cell nodes). - if cell_type not in cell_to_nodes: - # Initialize data structures if cell type not present - cell_to_nodes[cell_type] = np.atleast_2d(nodes_loc[0] + nodes_offset) - else: - # Extend existing data structures - cell_to_nodes[cell_type] = np.vstack( - (cell_to_nodes[cell_type], nodes_loc[0] + nodes_offset) - ) + # Define the cell type + cell_type = polygon_map.get(f"polygon{n}", f"polygon{n}") + + # Check if cell_type already defined in cell_to_nodes, otherwise construct + if cell_type not in cell_to_nodes: + cell_to_nodes[cell_type] = np.empty((0,n), dtype=int) + + # Special case: Triangle cells, i.e., n=3. + if cell_type == "triangle": + + # Triangles have a trivial connectivity since all nodes are connected. + + # Fetch triangle cells + cells = g_cell_map[cell_type] + # Extract indptr for triangle cells. Here, we in general loose information about + # the next cell, i.e., the end of the indices corresponding to the selected cells. + cn_indptr = g.cell_nodes().indptr[cells] + # Expand indptr, explicitly specifying the location of all nodes for each triangle cell. + # Integrate the explicit knowledge that each triangle cell consists of 3 nodes. + expanded_cn_indptr = np.vstack([cn_indptr + i for i in range(n)]).reshape(-1, order="F") + # Detect all corresponding nodes by applying the expanded mask to the indices + expanded_cn_indices = g.cell_nodes().indices[expanded_cn_indptr] + # Bring to number of triangle cells x 3 format by reshaping + cn = np.reshape(expanded_cn_indices, (-1,n), order="C") + + # Quad and polygon cells. + else: + # For quads/polygons, g.cell_nodes cannot be blindly used as for triangles, since the + # ordering of the nodes may define a cell of the type (here specific for quads) + # x--x and not x--x + # \/ | | + # /\ | | + # x--x x--x . + # Therefore, use both g.cell_faces and g.face_nodes to make use of face information + # and sort those correctly to retrieve the correct connectivity., + + # Strategy: Collect all cell nodes including their connectivity in a matrix of + # double num cell size. The goal will be to gather starting and end points for + # all faces of each cell, sort those faces, such that they form a circlular graph, + # and then choose the resulting starting points of all faces to define the connectivity. + + # Fetch corresponding cells + cells = g_cell_map[cell_type] + # Determine all faces of all cells. Use an analogous approach as used to determine + # all cell nodes for triangle cells. And use that a polygon with n nodes has also + # n faces. + cf_indptr = g.cell_faces.indptr[cells] + expanded_cf_indptr = np.vstack([cf_indptr + i for i in range(n)]).reshape(-1, order="F") + cf_indices = g.cell_faces.indices[expanded_cf_indptr] + + # Determine the associated (two) nodes of all faces for each cell. + fn_indptr = g.face_nodes.indptr[cf_indices] + # Extract nodes for first and second node of each face; reshape such + # that all first nodes of all faces for each cell are stored in one row, + # i.e., end up with an array of size num_cells (for this cell type) x n. + cfn_indices = [g.face_nodes.indices[fn_indptr + i].reshape(-1,n) for i in range(2)] + # Group first and second nodes, with alternating order of rows. + # By this, each cell is respresented by two rows. The first and second + # rows contain first and second nodes of faces. And each column stands + # for one face. + cfn = np.ravel(cfn_indices, order="F").reshape(n,-1).T + + # Sort faces for each cell such that they form a chain. Use a function + # compiled with Numba. Currently, no alternative is provided when Numba + # is not installed. This step is the bottleneck of this routine. + if not self._has_numba: + raise NotImplementedError("The sorting algorithm requires numba to be installed.") + cfn = self._sort_point_pairs_numba(cfn).astype(int) + + # For each cell pick the sorted nodes such that they form a chain and thereby + # define the connectivity, i.e., skip every second row. + cn = cfn[::2,:] + + # Add offset to account for previous grids, and store + cell_to_nodes[cell_type] = np.vstack( + (cell_to_nodes[cell_type], cn + nodes_offset) + ) # Update offset nodes_offset += g.num_nodes @@ -771,9 +712,16 @@ def _export_2d( # For each cell_type store the connectivity pattern cell_to_nodes for # the corresponding cells with ids from cell_id. for block, (cell_type, cell_block) in enumerate(cell_to_nodes.items()): - meshio_cells[block] = meshio.CellBlock(cell_type, cell_block.astype(int)) + # Meshio does not accept polygon{n} cell types. Remove {n} if + # cell type not special geometric object. + cell_type_meshio_format = polygon_map.get(f"polygon{n}", "polygon") + meshio_cells[block] = meshio.CellBlock( + cell_type_meshio_format, + cell_block.astype(int) + ) meshio_cell_id[block] = np.array(cell_id[cell_type]) + # Return final meshio data: points, cell (connectivity), cell ids return meshio_pts, meshio_cells, meshio_cell_id def _sort_point_pairs_numba(self, lines): @@ -908,7 +856,6 @@ def _export_3d( num_faces_per_cell = g.cell_faces.getnnz(axis=0) # Loop over all available cell types and group cells of one type. - # Init first data corresponding structure. g_cell_map = dict() for n in np.unique(num_faces_per_cell): # Define cell type; check if it coincides with a predefined cell type @@ -938,7 +885,7 @@ def _export_3d( # ordered in any specific type, as the geometrical object is invariant under # permutations. - # Determine all tetra cells + # Fetch tetra cells cells = g_cell_map[cell_type] # Extract indptr for all tetra cells. cn_indptr = g.cell_nodes().indptr[cells] @@ -953,7 +900,9 @@ def _export_3d( if cell_type not in cell_to_nodes: cell_to_nodes[cell_type] = np.empty((0,4), dtype=int) # Store cell-node connectivity, and add offset taking into account previous grids - cell_to_nodes[cell_type] = cn_indices + nodes_offset + cell_to_nodes[cell_type] = np.vstack( + (cell_to_nodes[cell_type], cn_indices + nodes_offset) + ) # Identify remaining cells as polyhedra else: @@ -962,7 +911,7 @@ def _export_3d( # larger than the info provided for tetra cells. Here, we make use of the fact # that g.face_nodes provides nodes ordered wrt. the right-hand rule. - # Determine all cells with n faces + # Fetch cells with n faces cells = g_cell_map[cell_type] # Store short cuts to cell-face and face-node information @@ -978,7 +927,7 @@ def _export_3d( # list over faces, and each face entry is given by the face nodes. if cell_type not in cell_to_faces: cell_to_faces[cell_type] = [] - cell_to_faces[cell_type] = [ + cell_to_faces[cell_type] += [ [ fn_indices[fn_indptr[f] : fn_indptr[f+1]] # nodes for f in cf_indices[cf_indptr[c]:cf_indptr[c+1]] # faces From da7656215be2c12030b6623d356184a45012b249 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Thu, 3 Mar 2022 23:59:22 +0100 Subject: [PATCH 09/83] MAINT: Unify treatment of simplices and apply to export 1d, 2d, 3d. --- src/porepy/viz/exporter.py | 122 ++++++++++++++++++++++--------------- 1 file changed, 73 insertions(+), 49 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 28b3276270..ed02623fa5 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -508,57 +508,95 @@ def _export_1d( """ Export the geometrical data (point coordinates) and connectivity information from the 1d PorePy grids to meshio. + + Parameters: + gs (Iterable[pp.Grid]): Subdomains. + + Returns: + Tuple[np.ndarray, np.ndarray, np.ndarray]: Points, 1d cells (storing the + connectivity), and cell ids in correct meshio format. """ - # in 1d we have only one cell type + # In 1d each cell is a line cell_type = "line" - # cell connectivity information - num_cells = np.sum(np.array([g.num_cells for g in gs])) - cell_to_nodes = {cell_type: np.empty((num_cells, 2))} # type: ignore - # cell id map - cell_id = {cell_type: np.empty(num_cells, dtype=int)} # type: ignore - cell_offset = 0 + # Dictionary storing cell->nodes connectivity information + cell_to_nodes: Dict[str, np.ndarray] = { + cell_type: np.empty((0, 2), dtype=int) + } + + # Dictionary collecting all cell ids for each cell type. + # Since each cell is a line, the list of cell ids is trivial + total_num_cells = np.sum(np.array([g.num_cells for g in gs])) + cell_id: Dict[str, List[int]] = { + cell_type: np.arange(total_num_cells, dtype=int).tolist() + } - # points + # Data structure for storing node coordinates of all 1d grids. num_pts = np.sum([g.num_nodes for g in gs]) meshio_pts = np.empty((num_pts, 3)) # type: ignore + + # Initialize offsets. Required taking into account multiple 1d grids. nodes_offset = 0 + cell_offset = 0 - # loop on all the 1d grids + # Loop over all 2d grids for g in gs: - # save the points information + + # Store node coordinates sl = slice(nodes_offset, nodes_offset + g.num_nodes) meshio_pts[sl, :] = g.nodes.T - # TODO vectorize, and use directly indices and indptr without sorting - # Cell-node relations - g_cell_nodes = g.cell_nodes() - g_nodes_cells, g_cells, _ = sps.find(g_cell_nodes) - # Ensure ordering of the cells - g_nodes_cells = g_nodes_cells[np.argsort(g_cells)] - - # loop on all the grid cells - for c in np.arange(g.num_cells): - loc = slice(g_cell_nodes.indptr[c], g_cell_nodes.indptr[c + 1]) - # get the local nodes and save them - cell_to_nodes[cell_type][cell_offset, :] = g_nodes_cells[loc] + nodes_offset - cell_id[cell_type][cell_offset] = cell_offset - cell_offset += 1 + # Lines are simplices, and have a trivial connectvity. + cn_indices = self._simplex_cell_to_nodes(2, g) + + # Add to previous connectivity information + cell_to_nodes[cell_type] = np.vstack( + (cell_to_nodes[cell_type], cn_indices + cell_offset) + ) + # Update offsets nodes_offset += g.num_nodes + cell_offset += g.num_cells - # construct the meshio data structure + # Construct the meshio data structure num_blocks = len(cell_to_nodes) meshio_cells = np.empty(num_blocks, dtype=object) meshio_cell_id = np.empty(num_blocks, dtype=object) + # For each cell_type store the connectivity pattern cell_to_nodes for + # the corresponding cells with ids from cell_id. for block, (cell_type, cell_block) in enumerate(cell_to_nodes.items()): meshio_cells[block] = meshio.CellBlock(cell_type, cell_block.astype(int)) meshio_cell_id[block] = np.array(cell_id[cell_type]) + # Return final meshio data: points, cell (connectivity), cell ids return meshio_pts, meshio_cells, meshio_cell_id + def _simplex_cell_to_nodes(self, n: int, g: pp.Grid, cells: Optional[np.ndarray] = None) -> np.ndarray: + """Determine cell to node connectivity for a n-simplex. + + Parameters: + n (int): order of the simplices in the grid. + g (pp.Grid): grid containing cells and nodes. + cells (np.ndarray, optional): all simplex cells + + Returns: + np.ndarray: cell to node connectivity array, in which for each row + all nodes are given. + """ + + # Determine cell-node ptr + cn_indptr = g.cell_nodes().indptr[:-1] if cells is None else g.cell_nodes().indptr[cells] + # Each cell contains n nodes + expanded_cn_indptr = np.vstack([cn_indptr + i for i in range(n)]).reshape(-1, order="F") + # Detect all corresponding nodes by applying the expanded mask to the indices + expanded_cn_indices = g.cell_nodes().indices[expanded_cn_indptr] + # Bring in correct form. + cn_indices = np.reshape(expanded_cn_indices, (-1,n), order="C") + + return cn_indices + def _export_2d( self, gs: Iterable[pp.Grid] ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: @@ -632,20 +670,12 @@ def _export_2d( # Special case: Triangle cells, i.e., n=3. if cell_type == "triangle": - # Triangles have a trivial connectivity since all nodes are connected. + # Triangles are simplices and have a trivial connectivity. # Fetch triangle cells cells = g_cell_map[cell_type] - # Extract indptr for triangle cells. Here, we in general loose information about - # the next cell, i.e., the end of the indices corresponding to the selected cells. - cn_indptr = g.cell_nodes().indptr[cells] - # Expand indptr, explicitly specifying the location of all nodes for each triangle cell. - # Integrate the explicit knowledge that each triangle cell consists of 3 nodes. - expanded_cn_indptr = np.vstack([cn_indptr + i for i in range(n)]).reshape(-1, order="F") - # Detect all corresponding nodes by applying the expanded mask to the indices - expanded_cn_indices = g.cell_nodes().indices[expanded_cn_indptr] - # Bring to number of triangle cells x 3 format by reshaping - cn = np.reshape(expanded_cn_indices, (-1,n), order="C") + # Determine the trivial connectivity. + cn_indices = self._simplex_cell_to_nodes(3, g, cells) # Quad and polygon cells. else: @@ -693,14 +723,14 @@ def _export_2d( # For each cell pick the sorted nodes such that they form a chain and thereby # define the connectivity, i.e., skip every second row. - cn = cfn[::2,:] + cn_indices = cfn[::2,:] # Add offset to account for previous grids, and store cell_to_nodes[cell_type] = np.vstack( - (cell_to_nodes[cell_type], cn + nodes_offset) + (cell_to_nodes[cell_type], cn_indices + nodes_offset) ) - # Update offset + # Update offsets nodes_offset += g.num_nodes cell_offset += g.num_cells @@ -885,17 +915,11 @@ def _export_3d( # ordered in any specific type, as the geometrical object is invariant under # permutations. - # Fetch tetra cells + # Fetch triangle cells cells = g_cell_map[cell_type] - # Extract indptr for all tetra cells. - cn_indptr = g.cell_nodes().indptr[cells] - # Expand indptr, explicitly specifying the location of all nodes for each tetra cell. - # Integrate the explicit knowledge that each tetra cell consists of 4 nodes. - expanded_cn_indptr = np.vstack([cn_indptr + i for i in range(4)]).reshape(-1, order="F") - # Detect all corresponding nodes by applying the expanded mask to the indices - cn_indices = g.cell_nodes().indices[expanded_cn_indptr] - # Bring to number of num tetra cells x 4 format by reshaping - cn_indices = np.reshape(cn_indices, (-1,4), order="C") + # Tetrahedra are simplices and have a trivial connectivity. + cn_indices = self._simplex_cell_to_nodes(4, g, cells) + # Initialize data structure if not available yet if cell_type not in cell_to_nodes: cell_to_nodes[cell_type] = np.empty((0,4), dtype=int) From d7f7849a5a8b9c782c3eb5627f7a2f0e209e6ff2 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 4 Mar 2022 22:30:25 +0100 Subject: [PATCH 10/83] MAINT/TST: Update tests for Exporter. Due to invasive changes in the export of geometric information in the updated Exporter, most of the prevous tests have failed. The following changes have been observed, and could be expected based on the current modifications of Exporter: * New cell type, now in 3d tetrahedra are specifically identified as such. Before they have been identified as general polyhedra. * Connectivity (nodes and faces) due to new sorting alorithms. In 1D, connectivity of nodes is consecutive as should be expected. This has not been the case for mortar grids in the previous tests. There, the connectivity instead suggested non-connected line segments (for 1d mortars). In 3d, for special geometric objects incl. tetrahedra, no face connectivity is required, making the vtu files lighter. * Order of cell_id. For 3d polyhedra, cells are grouped according to number of faces per cell. This grouping results in non- consecutive ordering of cells, when printing the output. This changes are significant and should be double-checked using simulations. --- tests/unit/test_vtk.py | 5616 ++++++++++------------------------------ 1 file changed, 1347 insertions(+), 4269 deletions(-) diff --git a/tests/unit/test_vtk.py b/tests/unit/test_vtk.py index 3a969da31b..6b5472f511 100644 --- a/tests/unit/test_vtk.py +++ b/tests/unit/test_vtk.py @@ -401,21 +401,21 @@ def _single_grid_2d_simplex_grid_vtu(self): 1 5 6 +2 3 7 2 -2 6 7 4 5 9 +4 8 9 -4 5 -10 6 +10 5 9 10 @@ -425,12 +425,12 @@ def _single_grid_2d_simplex_grid_vtu(self): 6 10 11 +8 9 13 8 12 13 -8 9 10 14 @@ -438,11 +438,11 @@ def _single_grid_2d_simplex_grid_vtu(self): 13 14 10 -15 11 -10 15 +10 14 +15 @@ -715,10 +715,10 @@ def _single_grid_2d_cart_grid_vtu(self): 7 8 3 +3 8 9 4 -3 5 10 11 @@ -727,14 +727,14 @@ def _single_grid_2d_cart_grid_vtu(self): 11 12 7 +7 12 13 8 -7 8 -9 -14 13 +14 +9 10 15 16 @@ -747,26 +747,26 @@ def _single_grid_2d_cart_grid_vtu(self): 17 18 13 +13 18 19 14 -13 15 20 21 16 +16 21 22 17 -16 17 22 23 18 18 -19 -24 23 +24 +19 @@ -2053,3090 +2053,168 @@ def _single_grid_3d_simplex_grid_vtu(self): -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 - - - -4 -3 -0 -4 -1 -3 -0 -16 -1 -3 -16 -4 -0 -3 -16 -4 -1 -4 -3 -16 -4 -1 -3 -20 -4 -1 -3 -1 -20 -16 -3 -4 -20 -16 -4 -3 -16 -20 -17 -3 -1 -20 -17 -3 -16 -1 -17 -3 -1 -20 -16 -4 -3 -4 -5 -1 -3 -20 -4 -1 -3 -20 -5 -1 -3 -4 -20 -5 -4 -3 -17 -5 -1 -3 -20 -5 -1 -3 -1 -20 -17 -3 -5 -20 -17 -4 -3 -20 -21 -17 -3 -20 -5 -21 -3 -5 -20 -17 -3 -5 -21 -17 -4 -3 -1 -5 -2 -3 -1 -17 -2 -3 -17 -5 -1 -3 -17 -5 -2 -4 -3 -5 -21 -17 -3 -17 -5 -2 -3 -21 -5 -2 -3 -2 -21 -17 -4 -3 -17 -21 -18 -3 -2 -21 -18 -3 -17 -2 -18 -3 -2 -21 -17 -4 -3 -21 -6 -2 -3 -5 -6 -2 -3 -21 -5 -2 -3 -5 -21 -6 -4 -3 -6 -21 -18 -3 -2 -21 -18 -3 -18 -6 -2 -3 -21 -6 -2 -4 -3 -6 -21 -18 -3 -6 -22 -18 -3 -21 -6 -22 -3 -21 -22 -18 -4 -3 -2 -6 -3 -3 -2 -18 -3 -3 -18 -6 -2 -3 -18 -6 -3 -4 -3 -6 -22 -18 -3 -3 -22 -18 -3 -18 -6 -3 -3 -22 -6 -3 -4 -3 -18 -3 -19 -3 -3 -22 -18 -3 -3 -22 -19 -3 -18 -22 -19 -4 -3 -6 -22 -7 -3 -22 -7 -3 -3 -22 -6 -3 -3 -6 -7 -3 -4 -3 -19 -7 -3 -3 -22 -7 -3 -3 -3 -22 -19 -3 -7 -22 -19 -4 -3 -22 -7 -23 -3 -22 -23 -19 -3 -7 -22 -19 -3 -7 -23 -19 -4 -3 -4 -8 -5 -3 -4 -20 -5 -3 -20 -8 -4 -3 -20 -8 -5 -4 -3 -5 -24 -20 -3 -20 -8 -5 -3 -24 -8 -5 -3 -8 -24 -20 -4 -3 -20 -24 -21 -3 -5 -24 -21 -3 -20 -5 -21 -3 -5 -24 -20 -4 -3 -8 -9 -5 -3 -24 -8 -5 -3 -24 -9 -5 -3 -8 -24 -9 -4 -3 -5 -24 -21 -3 -9 -24 -21 -3 -21 -9 -5 -3 -24 -9 -5 -4 -3 -9 -24 -21 -3 -9 -25 -21 -3 -24 -9 -25 -3 -24 -25 -21 -4 -3 -21 -9 -6 -3 -5 -9 -6 -3 -5 -21 -6 -3 -21 -9 -5 -4 -3 -9 -25 -21 -3 -6 -25 -21 -3 -21 -9 -6 -3 -25 -9 -6 -4 -3 -21 -6 -22 -3 -6 -25 -21 -3 -6 -25 -22 -3 -21 -25 -22 -4 -3 -9 -25 -10 -3 -25 -10 -6 -3 -9 -10 -6 -3 -25 -9 -6 -4 -3 -22 -10 -6 -3 -25 -10 -6 -3 -6 -25 -22 -3 -10 -25 -22 -4 -3 -25 -10 -26 -3 -10 -25 -22 -3 -10 -26 -22 -3 -25 -26 -22 -4 -3 -22 -10 -7 -3 -22 -10 -6 -3 -6 -22 -7 -3 -6 -10 -7 -4 -3 -22 -10 -7 -3 -26 -10 -7 -3 -7 -26 -22 -3 -10 -26 -22 -4 -3 -22 -7 -23 -3 -7 -26 -22 -3 -7 -26 -23 -3 -22 -26 -23 -4 -3 -10 -26 -11 -3 -26 -11 -7 -3 -10 -11 -7 -3 -26 -10 -7 -4 -3 -23 -11 -7 -3 -26 -11 -7 -3 -7 -26 -23 -3 -11 -26 -23 -4 -3 -26 -27 -23 -3 -26 -11 -27 -3 -11 -27 -23 -3 -11 -26 -23 -4 -3 -8 -12 -9 -3 -8 -24 -9 -3 -24 -12 -8 -3 -24 -12 -9 -4 -3 -9 -28 -24 -3 -12 -28 -24 -3 -24 -12 -9 -3 -28 -12 -9 -4 -3 -24 -9 -25 -3 -9 -28 -24 -3 -9 -28 -25 -3 -24 -28 -25 -4 -3 -28 -13 -9 -3 -12 -13 -9 -3 -28 -12 -9 -3 -12 -28 -13 -4 -3 -13 -28 -25 -3 -9 -28 -25 -3 -25 -13 -9 -3 -28 -13 -9 -4 -3 -13 -28 -25 -3 -13 -29 -25 -3 -28 -13 -29 -3 -28 -29 -25 -4 -3 -9 -13 -10 -3 -9 -25 -10 -3 -25 -13 -9 -3 -25 -13 -10 -4 -3 -13 -29 -25 -3 -10 -29 -25 -3 -25 -13 -10 -3 -29 -13 -10 -4 -3 -25 -10 -26 -3 -10 -29 -25 -3 -10 -29 -26 -3 -25 -29 -26 -4 -3 -13 -29 -14 -3 -29 -14 -10 -3 -29 -13 -10 -3 -13 -14 -10 -4 -3 -26 -14 -10 -3 -29 -14 -10 -3 -10 -29 -26 -3 -14 -29 -26 -4 -3 -29 -14 -30 -3 -29 -30 -26 -3 -14 -29 -26 -3 -14 -30 -26 -4 -3 -10 -14 -11 -3 -10 -26 -11 -3 -26 -14 -10 -3 -26 -14 -11 -4 -3 -14 -30 -26 -3 -26 -14 -11 -3 -30 -14 -11 -3 -11 -30 -26 -4 -3 -26 -30 -27 -3 -11 -30 -27 -3 -11 -30 -26 -3 -26 -11 -27 -4 -3 -14 -15 -11 -3 -30 -14 -11 -3 -30 -15 -11 -3 -14 -30 -15 -4 -3 -11 -30 -27 -3 -15 -30 -27 -3 -27 -15 -11 -3 -30 -15 -11 -4 -3 -15 -30 -27 -3 -15 -31 -27 -3 -30 -15 -31 -3 -30 -31 -27 -4 -3 -16 -20 -17 -3 -16 -32 -17 -3 -32 -20 -16 -3 -32 -20 -17 -4 -3 -17 -36 -32 -3 -20 -36 -32 -3 -36 -20 -17 -3 -32 -20 -17 -4 -3 -32 -17 -33 -3 -17 -36 -32 -3 -17 -36 -33 -3 -32 -36 -33 -4 -3 -20 -21 -17 -3 -36 -20 -17 -3 -36 -21 -17 -3 -20 -36 -21 -4 -3 -21 -36 -33 -3 -33 -21 -17 -3 -36 -21 -17 -3 -17 -36 -33 -4 -3 -36 -21 -37 -3 -36 -37 -33 -3 -21 -36 -33 -3 -21 -37 -33 -4 -3 -33 -21 -18 -3 -33 -21 -17 -3 -17 -33 -18 -3 -17 -21 -18 -4 -3 -33 -21 -18 -3 -37 -21 -18 -3 -18 -37 -33 -3 -21 -37 -33 -4 -3 -33 -18 -34 -3 -18 -37 -33 -3 -18 -37 -34 -3 -33 -37 -34 -4 -3 -21 -22 -18 -3 -37 -21 -18 -3 -37 -22 -18 -3 -21 -37 -22 -4 -3 -18 -37 -34 -3 -22 -37 -34 -3 -34 -22 -18 -3 -37 -22 -18 -4 -3 -37 -38 -34 -3 -37 -22 -38 -3 -22 -37 -34 -3 -22 -38 -34 -4 -3 -18 -22 -19 -3 -18 -34 -19 -3 -34 -22 -18 -3 -34 -22 -19 -4 -3 -34 -22 -19 -3 -38 -22 -19 -3 -19 -38 -34 -3 -22 -38 -34 -4 -3 -19 -38 -35 -3 -34 -19 -35 -3 -19 -38 -34 -3 -34 -38 -35 -4 -3 -22 -38 -23 -3 -38 -23 -19 -3 -38 -22 -19 -3 -22 -23 -19 -4 -3 -35 -23 -19 -3 -38 -23 -19 -3 -19 -38 -35 -3 -23 -38 -35 -4 -3 -23 -38 -35 -3 -23 -39 -35 -3 -38 -23 -39 -3 -38 -39 -35 -4 -3 -20 -24 -21 -3 -20 -36 -21 -3 -36 -24 -20 -3 -36 -24 -21 -4 -3 -21 -40 -36 -3 -24 -40 -36 -3 -36 -24 -21 -3 -40 -24 -21 -4 -3 -36 -21 -37 -3 -21 -40 -36 -3 -21 -40 -37 -3 -36 -40 -37 -4 -3 -40 -25 -21 -3 -24 -40 -25 -3 -24 -25 -21 -3 -40 -24 -21 -4 -3 -37 -25 -21 -3 -40 -25 -21 -3 -21 -40 -37 -3 -25 -40 -37 -4 -3 -40 -41 -37 -3 -25 -40 -37 -3 -25 -41 -37 -3 -40 -25 -41 -4 -3 -37 -25 -21 -3 -37 -25 -22 -3 -21 -25 -22 -3 -21 -37 -22 -4 -3 -22 -41 -37 -3 -25 -41 -37 -3 -37 -25 -22 -3 -41 -25 -22 -4 -3 -22 -41 -38 -3 -37 -41 -38 -3 -37 -22 -38 -3 -22 -41 -37 -4 -3 -25 -26 -22 -3 -41 -25 -22 -3 -41 -26 -22 -3 -25 -41 -26 -4 -3 -22 -41 -38 -3 -26 -41 -38 -3 -38 -26 -22 -3 -41 -26 -22 -4 -3 -26 -41 -38 -3 -26 -42 -38 -3 -41 -26 -42 -3 -41 -42 -38 -4 -3 -22 -26 -23 -3 -22 -38 -23 -3 -38 -26 -22 -3 -38 -26 -23 -4 -3 -26 -42 -38 -3 -23 -42 -38 -3 -42 -26 -23 -3 -38 -26 -23 -4 -3 -38 -23 -39 -3 -23 -42 -38 -3 -23 -42 -39 -3 -38 -42 -39 -4 -3 -26 -27 -23 -3 -42 -26 -23 -3 -42 -27 -23 -3 -26 -42 -27 -4 -3 -27 -42 -39 -3 -23 -42 -39 -3 -39 -27 -23 -3 -42 -27 -23 -4 -3 -27 -42 -39 -3 -27 -43 -39 -3 -42 -27 -43 -3 -42 -43 -39 -4 -3 -40 -28 -25 -3 -40 -28 -24 -3 -24 -40 -25 -3 -24 -28 -25 -4 -3 -40 -28 -25 -3 -44 -28 -25 -3 -25 -44 -40 -3 -28 -44 -40 -4 -3 -40 -25 -41 -3 -25 -44 -40 -3 -25 -44 -41 -3 -40 -44 -41 -4 -3 -28 -44 -29 -3 -44 -29 -25 -3 -28 -29 -25 -3 -44 -28 -25 -4 -3 -41 -29 -25 -3 -44 -29 -25 -3 -25 -44 -41 -3 -29 -44 -41 -4 -3 -44 -45 -41 -3 -44 -29 -45 -3 -29 -45 -41 -3 -29 -44 -41 -4 -3 -25 -29 -26 -3 -25 -41 -26 -3 -41 -29 -25 -3 -41 -29 -26 -4 -3 -26 -45 -41 -3 -29 -45 -41 -3 -41 -29 -26 -3 -45 -29 -26 -4 -3 -41 -26 -42 -3 -26 -45 -41 -3 -26 -45 -42 -3 -41 -45 -42 -4 -3 -29 -45 -30 -3 -29 -30 -26 -3 -45 -29 -26 -3 -45 -30 -26 -4 -3 -26 -45 -42 -3 -30 -45 -42 -3 -42 -30 -26 -3 -45 -30 -26 -4 -3 -30 -45 -42 -3 -30 -46 -42 -3 -45 -30 -46 -3 -45 -46 -42 -4 -3 -42 -30 -26 -3 -42 -30 -27 -3 -26 -30 -27 -3 -26 -42 -27 -4 -3 -42 -30 -27 -3 -46 -30 -27 -3 -27 -46 -42 -3 -30 -46 -42 -4 -3 -42 -27 -43 -3 -27 -46 -42 -3 -27 -46 -43 -3 -42 -46 -43 -4 -3 -30 -46 -31 -3 -46 -31 -27 -3 -46 -30 -27 -3 -30 -31 -27 -4 -3 -43 -31 -27 -3 -46 -31 -27 -3 -27 -46 -43 -3 -31 -46 -43 -4 -3 -46 -31 -47 -3 -46 -47 -43 -3 -31 -46 -43 -3 -31 -47 -43 -4 -3 -32 -36 -33 -3 -32 -48 -33 -3 -48 -36 -32 -3 -48 -36 -33 -4 -3 -36 -52 -48 -3 -48 -36 -33 -3 -52 -36 -33 -3 -33 -52 -48 -4 -3 -33 -52 -49 -3 -48 -52 -49 -3 -48 -33 -49 -3 -33 -52 -48 -4 -3 -36 -37 -33 -3 -52 -36 -33 -3 -52 -37 -33 -3 -36 -52 -37 -4 -3 -33 -52 -49 -3 -37 -52 -49 -3 -49 -37 -33 -3 -52 -37 -33 -4 -3 -37 -52 -49 -3 -37 -53 -49 -3 -52 -37 -53 -3 -52 -53 -49 -4 -3 -49 -37 -34 -3 -33 -37 -34 -3 -33 -49 -34 -3 -49 -37 -33 -4 -3 -34 -53 -49 -3 -37 -53 -49 -3 -49 -37 -34 -3 -53 -37 -34 -4 -3 -49 -34 -50 -3 -34 -53 -49 -3 -34 -53 -50 -3 -49 -53 -50 -4 -3 -37 -38 -34 -3 -53 -37 -34 -3 -53 -38 -34 -3 -37 -53 -38 -4 -3 -38 -53 -50 -3 -34 -53 -50 -3 -53 -38 -34 -3 -50 -38 -34 -4 -3 -38 -53 -50 -3 -38 -54 -50 -3 -53 -38 -54 -3 -53 -54 -50 -4 -3 -34 -38 -35 -3 -34 -50 -35 -3 -50 -38 -34 -3 -50 -38 -35 -4 -3 -38 -54 -50 -3 -35 -54 -50 -3 -50 -38 -35 -3 -54 -38 -35 -4 -3 -50 -35 -51 -3 -35 -54 -50 -3 -35 -54 -51 -3 -50 -54 -51 -4 -3 -54 -39 -35 -3 -38 -54 -39 -3 -38 -39 -35 -3 -54 -38 -35 -4 -3 -51 -39 -35 -3 -54 -39 -35 -3 -35 -54 -51 -3 -39 -54 -51 -4 -3 -54 -55 -51 -3 -39 -54 -51 -3 -39 -55 -51 -3 -54 -39 -55 -4 -3 -52 -40 -37 -3 -52 -40 -36 -3 -36 -40 -37 -3 -36 -52 -37 -4 -3 -52 -40 -37 -3 -56 -40 -37 -3 -37 -56 -52 -3 -40 -56 -52 -4 -3 -52 -56 -53 -3 -37 -56 -53 -3 -52 -37 -53 -3 -37 -56 -52 -4 -3 -40 -41 -37 -3 -56 -40 -37 -3 -56 -41 -37 -3 -40 -56 -41 -4 -3 -37 -56 -53 -3 -53 -41 -37 -3 -56 -41 -37 -3 -41 -56 -53 -4 -3 -56 -57 -53 -3 -56 -41 -57 -3 -41 -57 -53 -3 -41 -56 -53 -4 -3 -37 -41 -38 -3 -37 -53 -38 -3 -53 -41 -37 -3 -53 -41 -38 -4 -3 -53 -41 -38 -3 -57 -41 -38 -3 -38 -57 -53 -3 -41 -57 -53 -4 -3 -53 -57 -54 -3 -38 -57 -54 -3 -53 -38 -54 -3 -38 -57 -53 -4 -3 -41 -42 -38 -3 -57 -41 -38 -3 -57 -42 -38 -3 -41 -57 -42 -4 -3 -42 -57 -54 -3 -38 -57 -54 -3 -57 -42 -38 -3 -54 -42 -38 -4 -3 -42 -57 -54 -3 -42 -58 -54 -3 -57 -42 -58 -3 -57 -58 -54 -4 -3 -54 -42 -38 -3 -54 -42 -39 -3 -38 -42 -39 -3 -38 -54 -39 -4 -3 -54 -42 -39 -3 -58 -42 -39 -3 -39 -58 -54 -3 -42 -58 -54 -4 -3 -39 -58 -55 -3 -54 -39 -55 -3 -39 -58 -54 -3 -54 -58 -55 -4 -3 -42 -58 -43 -3 -58 -43 -39 -3 -58 -42 -39 -3 -42 -43 -39 -4 -3 -55 -43 -39 -3 -58 -43 -39 -3 -39 -58 -55 -3 -43 -58 -55 -4 -3 -58 -43 -59 -3 -58 -59 -55 -3 -43 -58 -55 -3 -43 -59 -55 -4 -3 -40 -44 -41 -3 -40 -56 -41 -3 -56 -44 -40 -3 -56 -44 -41 -4 -3 -44 -60 -56 -3 -56 -44 -41 -3 -60 -44 -41 -3 -41 -60 -56 -4 -3 -56 -60 -57 -3 -41 -60 -57 -3 -56 -41 -57 -3 -41 -60 -56 -4 -3 -44 -45 -41 -3 -60 -44 -41 -3 -60 -45 -41 -3 -44 -60 -45 -4 -3 -45 -60 -57 -3 -41 -60 -57 -3 -57 -45 -41 -3 -60 -45 -41 -4 -3 -45 -60 -57 -3 -45 -61 -57 -3 -60 -45 -61 -3 -60 -61 -57 -4 -3 -57 -45 -41 -3 -41 -45 -42 -3 -41 -57 -42 -3 -57 -45 -42 -4 -3 -45 -61 -57 -3 -42 -61 -57 -3 -61 -45 -42 -3 -57 -45 -42 -4 -3 -57 -42 -58 -3 -42 -61 -57 -3 -42 -61 -58 -3 -57 -61 -58 -4 -3 -45 -46 -42 -3 -61 -45 -42 -3 -61 -46 -42 -3 -45 -61 -46 -4 -3 -46 -61 -58 -3 -42 -61 -58 -3 -58 -46 -42 -3 -61 -46 -42 -4 -3 -46 -61 -58 -3 -46 -62 -58 -3 -61 -46 -62 -3 -61 -62 -58 -4 -3 -58 -46 -43 -3 -58 -46 -42 -3 -42 -58 -43 -3 -42 -46 -43 -4 -3 -58 -46 -43 -3 -62 -46 -43 -3 -43 -62 -58 -3 -46 -62 -58 -4 -3 -43 -62 -59 -3 -58 -62 -59 -3 -58 -43 -59 -3 -43 -62 -58 -4 -3 -46 -47 -43 -3 -62 -46 -43 -3 -62 -47 -43 -3 -46 -62 -47 -4 -3 -47 -62 -59 -3 -59 -47 -43 -3 -62 -47 -43 -3 -43 -62 -59 -4 -3 -47 -63 -59 -3 -62 -47 -63 -3 -47 -62 -59 -3 -62 -63 -59 - - - -17 -34 -51 -68 -85 -102 -119 -136 -153 -170 -187 -204 -221 -238 -255 -272 -289 -306 -323 -340 -357 -374 -391 -408 -425 -442 -459 -476 -493 -510 -527 -544 -561 -578 -595 -612 -629 -646 -663 -680 -697 -714 -731 -748 -765 -782 -799 -816 -833 -850 -867 -884 -901 -918 -935 -952 -969 -986 -1003 -1020 -1037 -1054 -1071 -1088 -1105 -1122 -1139 -1156 -1173 -1190 -1207 -1224 -1241 -1258 -1275 -1292 -1309 -1326 -1343 -1360 -1377 -1394 -1411 -1428 -1445 -1462 -1479 -1496 -1513 -1530 -1547 -1564 -1581 -1598 -1615 -1632 -1649 -1666 -1683 -1700 -1717 -1734 -1751 -1768 -1785 -1802 -1819 -1836 -1853 -1870 -1887 -1904 -1921 -1938 -1955 -1972 -1989 -2006 -2023 -2040 -2057 -2074 -2091 -2108 -2125 -2142 -2159 -2176 -2193 -2210 -2227 -2244 -2261 -2278 -2295 -2312 -2329 -2346 -2363 -2380 -2397 -2414 -2431 -2448 -2465 -2482 -2499 -2516 -2533 -2550 -2567 -2584 -2601 -2618 -2635 -2652 -2669 -2686 -2703 -2720 -2737 -2754 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 +10 @@ -7170,1988 +4248,1988 @@ def _single_grid_3d_cart_grid_vtu(self): 6 4 -25 -30 -5 0 +5 +30 +25 4 -26 -31 -6 1 +6 +31 +26 4 -25 0 -1 +25 26 +1 4 -30 5 -6 +30 31 +6 4 0 -5 -6 1 +6 +5 4 25 -30 -31 26 +31 +30 6 4 -26 +1 +6 31 +26 +4 +2 +7 32 27 4 1 -6 -7 +26 +27 2 4 -31 6 -7 -32 -4 -27 +31 32 7 -2 -4 -26 -31 -6 -1 4 -26 1 2 -27 +7 6 4 +26 27 32 -7 +31 +6 +4 2 +7 +32 +27 4 -28 -33 -8 3 +8 +33 +28 4 -27 2 -3 +27 28 +3 4 -32 7 -8 +32 33 +8 4 2 -7 -8 3 +8 +7 4 27 -32 -33 -28 -6 -4 28 33 -34 -29 +32 +6 4 3 8 -9 +33 +28 4 4 -33 -8 9 34 +29 4 +3 +28 29 +4 +4 +8 +33 34 9 4 +3 4 -28 -33 +9 8 -3 4 28 -3 -4 29 +34 +33 6 4 -30 -35 -10 5 +10 +35 +30 4 -31 -36 -11 6 +11 +36 +31 4 -30 5 -6 +30 31 +6 4 -35 10 -11 +35 36 +11 4 5 -10 -11 6 +11 +10 4 30 -35 -36 31 +36 +35 6 4 -31 6 -7 -32 -4 -31 +11 36 +31 +4 +7 +12 37 32 4 6 -11 -12 +31 +32 7 4 -32 +11 +36 37 12 +4 +6 7 +12 +11 4 31 +32 +37 36 -11 6 4 -36 -11 +7 12 37 -6 +32 4 -7 -12 -13 8 +13 +38 +33 4 -32 -37 -12 7 -4 +32 33 +8 +4 +12 +37 38 13 -8 4 -32 7 8 -33 -4 -37 -12 13 -38 +12 4 32 -37 -38 33 +38 +37 6 4 -33 +8 +13 38 +33 +4 +9 +14 39 34 4 8 -13 -14 +33 +34 9 4 -38 13 -14 +38 39 +14 4 -33 8 9 -34 -4 -34 -39 14 -9 +13 4 33 +34 +39 38 -13 -8 6 4 -35 -40 -15 10 +15 +40 +35 4 -36 -41 -16 11 +16 +41 +36 4 -35 10 -11 +35 36 +11 4 -40 15 -16 +40 41 +16 4 10 -15 -16 11 +16 +15 4 35 -40 -41 36 +41 +40 6 4 -36 +11 +16 41 +36 +4 +12 +17 42 37 4 11 -16 -17 +36 +37 12 4 -41 16 -17 +41 42 +17 4 -36 11 12 -37 -4 -37 -42 17 -12 +16 4 36 +37 +42 41 -16 -11 6 4 -37 -42 -17 12 +17 +42 +37 4 -38 -43 -18 13 +18 +43 +38 4 -37 12 -13 +37 38 +13 4 -42 17 -18 +42 43 +18 4 12 -17 -18 13 +18 +17 4 37 -42 -43 38 +43 +42 6 4 -38 +13 +18 43 +38 +4 +14 +19 44 39 4 13 -18 -19 -14 -4 38 -13 -14 39 +14 4 -39 +18 +43 44 19 -14 4 -38 -43 -18 13 -4 -43 -18 +14 19 +18 +4 +38 +39 44 +43 6 4 -40 -45 -20 15 +20 +45 +40 4 -41 -46 -21 16 +21 +46 +41 4 -40 15 -16 +40 41 +16 4 -45 20 -21 +45 46 +21 4 15 -20 -21 16 +21 +20 4 40 +41 +46 45 +6 +4 +16 +21 46 41 -6 4 -42 -47 -22 17 +22 +47 +42 4 -41 -46 -21 16 -4 41 -16 -17 42 +17 4 -46 21 -22 +46 47 +22 4 16 -21 -22 17 +22 +21 4 41 -46 -47 42 +47 +46 6 4 -47 +17 22 -23 -48 -4 +47 42 -17 +4 18 +23 +48 43 4 17 -22 -23 +42 +43 18 4 -42 -47 22 -17 -4 -43 +47 48 23 +4 +17 18 +23 +22 4 42 -47 -48 43 +48 +47 6 4 -43 +18 +23 48 +43 +4 +19 +24 49 44 4 18 -23 -24 +43 +44 19 4 -48 23 -24 +48 49 +24 4 -43 18 19 -44 -4 -44 -49 24 -19 +23 4 43 +44 +49 48 -23 -18 6 4 -50 -55 -30 25 +30 +55 +50 4 -51 -56 -31 26 +31 +56 +51 4 -50 25 -26 +50 51 +26 4 -55 30 -31 +55 56 +31 4 25 -30 -31 26 +31 +30 4 50 -55 -56 51 +56 +55 6 4 -51 +26 +31 56 +51 +4 +27 +32 57 52 4 26 -31 -32 +51 +52 27 4 -56 31 -32 +56 57 +32 4 -51 26 27 -52 -4 -52 -57 32 -27 +31 4 51 +52 +57 56 -31 -26 6 4 -52 -57 -32 27 +32 +57 +52 4 -53 -58 -33 28 +33 +58 +53 4 -52 27 -28 +52 53 +28 4 -57 32 -33 +57 58 +33 4 27 -32 -33 28 +33 +32 4 52 -57 -58 53 +58 +57 6 4 -53 +28 +33 58 +53 +4 +29 +34 59 54 4 28 -33 -34 +53 +54 29 4 -58 33 -34 +58 59 +34 4 -53 28 29 -54 -4 -54 -59 34 -29 +33 4 53 +54 +59 58 -33 -28 6 4 -55 -60 -35 30 +35 +60 +55 4 -56 -61 -36 31 +36 +61 +56 4 -55 30 -31 +55 56 +31 4 -60 35 -36 +60 61 +36 4 30 -35 -36 31 +36 +35 4 55 -60 -61 56 +61 +60 6 4 -56 +31 +36 61 +56 +4 +32 +37 62 57 4 31 -36 -37 +56 +57 32 4 -61 36 -37 -62 -4 -57 +61 62 37 -32 4 -56 -61 -36 31 +32 +37 +36 4 56 -31 -32 57 +62 +61 6 4 -57 -62 -37 32 +37 +62 +57 4 -58 -63 -38 33 +38 +63 +58 4 -57 32 -33 +57 58 +33 4 -62 37 -38 +62 63 +38 4 32 -37 -38 33 +38 +37 4 57 -62 -63 58 +63 +62 6 4 33 38 -39 -34 -4 -58 63 +58 +4 +34 +39 64 59 4 -58 33 -34 +58 59 +34 4 -59 +38 +63 64 39 -34 4 -58 -63 -38 33 -4 -63 -38 +34 39 +38 +4 +58 +59 64 +63 6 4 -60 -65 -40 35 +40 +65 +60 4 -61 -66 -41 36 +41 +66 +61 4 -60 35 -36 +60 61 +36 4 -65 40 -41 +65 66 +41 4 35 -40 -41 36 +41 +40 4 60 -65 -66 61 +66 +65 6 4 -66 +36 41 +66 +61 +4 +37 42 67 +62 4 -61 -66 -41 36 -4 +61 62 +37 +4 +41 +66 67 42 -37 4 -61 36 37 -62 -4 -36 -41 42 -37 +41 4 61 -66 -67 62 +67 +66 6 4 37 42 -43 -38 -4 67 -42 +62 +4 +38 43 68 +63 4 +37 62 -67 -68 63 +38 4 -63 +42 +67 68 43 -38 4 -62 -67 -42 37 +38 +43 +42 4 62 -37 -38 63 +68 +67 6 4 -63 +38 +43 68 +63 +4 +39 +44 69 64 4 38 -43 -44 +63 +64 39 4 -68 43 -44 -69 -4 -64 +68 69 44 -39 4 -63 -68 -43 38 +39 +44 +43 4 63 -38 -39 64 +69 +68 6 4 -65 -70 -45 40 +45 +70 +65 4 -66 -71 -46 41 +46 +71 +66 4 -65 40 -41 +65 66 +41 4 -70 45 -46 +70 71 +46 4 40 -45 -46 41 +46 +45 4 65 -70 -71 66 +71 +70 6 4 -66 -71 -46 41 +46 +71 +66 4 -67 -72 -47 42 +47 +72 +67 4 -66 41 -42 +66 67 +42 4 -71 46 -47 +71 72 +47 4 41 -46 -47 42 +47 +46 4 66 -71 -72 67 +72 +71 6 4 -72 +42 47 -48 -73 -4 +72 67 -42 +4 43 +48 +73 68 4 42 -47 -48 +67 +68 43 4 -67 -72 47 -42 -4 -68 +72 73 48 +4 +42 43 +48 +47 4 67 -72 -73 68 +73 +72 6 4 43 48 -49 -44 -4 73 -48 +68 +4 +44 49 74 +69 4 +43 68 -73 -74 69 +44 4 -69 +48 +73 74 49 -44 4 -68 43 44 -69 +49 +48 4 68 +69 +74 73 -48 -43 6 4 -75 -80 -55 50 +55 +80 +75 4 -76 -81 -56 51 +56 +81 +76 4 -75 50 -51 +75 76 +51 4 -80 55 -56 +80 81 +56 4 50 -55 -56 51 +56 +55 4 75 -80 -81 76 +81 +80 6 4 -76 +51 +56 81 +76 +4 +52 +57 82 77 4 51 -56 -57 +76 +77 52 4 -81 56 -57 -82 -4 -77 +81 82 57 -52 4 -76 -81 -56 51 +52 +57 +56 4 76 -51 -52 77 +82 +81 6 4 -77 -82 -57 52 +57 +82 +77 4 -78 -83 -58 53 +58 +83 +78 4 -77 52 -53 +77 78 +53 4 -82 57 -58 +82 83 +58 4 52 -57 -58 53 +58 +57 4 77 -82 -83 78 +83 +82 6 4 53 58 -59 -54 -4 -78 83 -84 -79 +78 4 -83 -58 +54 59 84 +79 4 -78 53 -54 +78 79 +54 4 -79 +58 +83 84 59 +4 +53 54 +59 +58 4 78 +79 +84 83 -58 -53 6 4 -80 -85 -60 55 +60 +85 +80 4 -81 -86 -61 56 +61 +86 +81 4 -80 55 -56 +80 81 +56 4 -85 60 -61 +85 86 +61 4 55 -60 -61 56 +61 +60 4 80 -85 -86 81 +86 +85 6 4 -81 +56 +61 86 +81 +4 +57 +62 87 82 4 56 -61 -62 +81 +82 57 4 -86 61 -62 -87 -4 -82 +86 87 62 -57 4 -81 -86 -61 56 +57 +62 +61 4 81 -56 -57 82 +87 +86 6 4 -82 -87 -62 57 +62 +87 +82 4 -83 -88 -63 58 +63 +88 +83 4 -82 57 -58 +82 83 +58 4 -87 62 -63 +87 88 +63 4 57 -62 -63 58 +63 +62 4 82 -87 -88 83 +88 +87 6 4 -88 +58 63 +88 +83 +4 +59 64 89 +84 4 58 -63 -64 +83 +84 59 4 -83 +63 88 89 -84 +64 4 -83 58 59 -84 -4 -83 -88 +64 63 -58 4 +83 84 89 -64 -59 +88 6 4 -85 -90 -65 60 +65 +90 +85 4 -86 -91 -66 61 +66 +91 +86 4 -85 60 -61 +85 86 +61 4 -90 65 -66 +90 91 +66 4 60 -65 -66 61 +66 +65 4 85 -90 -91 86 +91 +90 6 4 -86 +61 +66 91 +86 +4 +62 +67 92 87 4 -86 61 -62 +86 87 +62 4 -61 66 +91 +92 67 -62 4 -86 -91 -66 61 +62 +67 +66 4 +86 87 92 -67 -62 -4 91 -66 -67 -92 6 4 -87 -92 -67 62 +67 +92 +87 4 -88 -93 -68 63 +68 +93 +88 4 -87 62 -63 +87 88 +63 4 -92 67 -68 +92 93 +68 4 62 -67 -68 63 +68 +67 4 87 -92 -93 88 +93 +92 6 4 -93 +63 68 +93 +88 +4 +64 69 94 +89 4 +63 88 -93 -94 89 +64 4 -63 68 +93 +94 69 -64 4 -88 63 64 -89 -4 -88 -93 +69 68 -63 4 +88 89 94 -69 -64 +93 6 4 -90 -95 -70 65 +70 +95 +90 4 -91 -96 -71 66 +71 +96 +91 4 -90 65 -66 +90 91 +66 4 -95 70 -71 +95 96 +71 4 65 -70 -71 66 +71 +70 4 90 -95 -96 91 +96 +95 6 4 -91 +66 +71 96 +91 +4 +67 +72 97 92 4 66 -71 -72 +91 +92 67 4 -96 71 -72 +96 97 +72 4 -91 66 67 -92 -4 -91 -96 +72 71 -66 4 +91 92 97 -72 -67 +96 6 4 -92 -97 -72 67 +72 +97 +92 4 -93 -98 -73 68 +73 +98 +93 4 -92 67 -68 +92 93 +68 4 -97 72 -73 +97 98 +73 4 67 -72 -73 68 +73 +72 4 92 -97 -98 93 +98 +97 6 4 -93 +68 +73 98 -99 -94 +93 4 -98 -73 +69 74 99 +94 4 68 -73 -74 +93 +94 69 4 -94 +73 +98 99 74 -69 4 -93 68 69 -94 +74 +73 4 93 +94 +99 98 -73 -68 6 4 -100 -105 -80 75 +80 +105 +100 4 -101 -106 -81 76 +81 +106 +101 4 -100 75 -76 +100 101 +76 4 -105 80 -81 +105 106 +81 4 75 -80 -81 76 +81 +80 4 100 -105 -106 101 +106 +105 6 4 -106 +76 81 +106 +101 +4 +77 82 107 +102 4 +76 101 -106 -107 102 +77 4 -76 81 +106 +107 82 -77 4 -101 76 77 -102 -4 -102 -107 82 -77 +81 4 101 +102 +107 106 -81 -76 6 4 -102 -107 -82 77 +82 +107 +102 4 -103 -108 -83 78 +83 +108 +103 4 -102 77 -78 +102 103 +78 4 -107 82 -83 +107 108 +83 4 77 -82 -83 78 +83 +82 4 102 -107 -108 103 +108 +107 6 4 78 83 -84 -79 -4 108 -83 +103 +4 +79 84 109 +104 4 +78 103 +104 +79 +4 +83 108 109 -104 +84 4 -103 78 79 -104 -4 -103 -108 +84 83 -78 4 +103 104 109 -84 -79 +108 6 4 -105 -110 -85 80 +85 +110 +105 4 -106 -111 -86 81 +86 +111 +106 4 -105 80 -81 +105 106 +81 4 -110 85 -86 +110 111 +86 4 80 -85 -86 81 +86 +85 4 105 -110 -111 106 +111 +110 6 4 -106 +81 +86 111 +106 +4 +82 +87 112 107 4 81 -86 -87 +106 +107 82 4 -111 86 -87 +111 112 +87 4 -106 81 82 -107 -4 -106 -111 +87 86 -81 4 +106 107 112 -87 -82 +111 6 4 -107 -112 -87 82 +87 +112 +107 4 -108 -113 -88 83 +88 +113 +108 4 -107 82 -83 +107 108 +83 4 -112 87 -88 +112 113 +88 4 82 -87 -88 83 +88 +87 4 107 -112 -113 108 +113 +112 6 4 -108 +83 +88 113 -114 -109 +108 4 -113 -88 +84 89 114 +109 4 83 -88 -89 +108 +109 84 4 -109 +88 +113 114 89 -84 4 -108 -113 -88 83 +84 +89 +88 4 108 -83 -84 109 +114 +113 6 4 -110 -115 -90 85 +90 +115 +110 4 -111 -116 -91 86 +91 +116 +111 4 -110 85 -86 +110 111 +86 4 -115 90 -91 +115 116 +91 4 85 -90 -91 86 +91 +90 4 110 -115 -116 111 +116 +115 6 4 -111 +86 +91 116 +111 +4 +87 +92 117 112 4 86 -91 -92 +111 +112 87 4 -116 91 -92 +116 117 +92 4 -111 86 87 -112 -4 -111 -116 +92 91 -86 4 +111 112 117 -92 -87 +116 6 4 -112 -117 -92 87 +92 +117 +112 4 -113 -118 -93 88 +93 +118 +113 4 -112 87 -88 +112 113 +88 4 -117 92 -93 +117 118 +93 4 87 -92 -93 88 +93 +92 4 112 -117 -118 113 +118 +117 6 4 -113 +88 +93 118 -119 -114 +113 4 -118 -93 +89 94 119 +114 4 88 -93 -94 +113 +114 89 4 -114 +93 +118 119 94 -89 4 -113 -118 -93 88 +89 +94 +93 4 113 -88 -89 114 +119 +118 6 4 -115 -120 -95 90 +95 +120 +115 4 -116 -121 -96 91 +96 +121 +116 4 -115 90 -91 +115 116 +91 4 -120 95 -96 +120 121 +96 4 90 -95 -96 91 +96 +95 4 115 -120 -121 116 +121 +120 6 4 -116 +91 +96 121 -122 -117 +116 4 -121 -96 +92 97 122 +117 4 91 -96 -97 +116 +117 92 4 -117 +96 +121 122 97 -92 4 -116 -121 -96 91 +92 +97 +96 4 116 -91 -92 117 +122 +121 6 4 -117 -122 -97 92 +97 +122 +117 4 -118 -123 -98 93 +98 +123 +118 4 -117 92 -93 +117 118 +93 4 -122 97 -98 +122 123 +98 4 92 -97 -98 93 +98 +97 4 117 -122 -123 118 +123 +122 6 4 93 98 -99 +123 +118 +4 94 +99 +124 +119 4 -118 -123 -98 93 -4 +118 119 +94 +4 +98 +123 124 99 -94 4 -118 93 94 -119 -4 -123 -98 99 -124 +98 4 118 -123 -124 119 +124 +123 @@ -9782,26 +6860,14 @@ def _single_grid_3d_polytop_grid_vtu(self): -0 -1 -2 -4 -5 -6 -8 -9 -12 -13 -14 16 17 -18 20 21 -24 -25 28 29 +32 +33 2 3 5 @@ -9836,14 +6902,26 @@ def _single_grid_3d_polytop_grid_vtu(self): 33 34 35 +0 +1 +2 +4 +5 +6 +8 +9 +12 +13 +14 16 17 +18 20 21 +24 +25 28 29 -32 -33 24 25 26 @@ -9871,9 +6949,9 @@ def _single_grid_3d_polytop_grid_vtu(self): -20 -36 -54 +8 +24 +42 62 86 @@ -9887,397 +6965,397 @@ def _single_grid_3d_polytop_grid_vtu(self): -18 -4 -12 -16 -4 -0 -4 -24 -28 -29 -25 +6 4 16 20 -21 -17 -4 -13 -17 -18 -14 -4 -4 -8 -9 -5 -4 -1 -5 -6 -2 -4 +32 28 -16 +4 17 +21 +33 29 4 -24 -12 -13 -25 +16 +28 +29 +17 4 20 -8 -9 +32 +33 21 4 -0 -4 -5 -1 -4 -13 -1 -2 -14 -4 -12 -0 -1 -13 -4 -25 -29 -17 -13 -4 -24 -28 16 -12 -4 17 21 -9 -5 -4 -16 20 -8 -4 4 +28 +29 +33 +32 14 -18 -6 -2 4 -17 -5 +2 6 18 14 4 -2 -6 -7 3 -4 -18 -22 -23 +7 19 +15 4 -17 +5 +9 21 -22 -18 +17 4 -14 -18 +7 +11 +23 19 +4 +2 +14 15 +3 4 +5 +17 +18 6 +4 +9 +21 +22 +10 +4 10 +22 +23 11 +4 +2 +3 7 +6 4 5 -9 +6 10 +9 +4 6 +7 +11 +10 +4 +14 +15 +19 +18 4 +17 +18 22 -10 -11 +21 +4 +18 +19 23 +22 +16 +4 +13 +17 +29 +25 4 15 19 -7 -3 +31 +27 4 17 -5 -6 -18 +21 +33 +29 +4 +19 +23 +35 +31 +4 +13 +25 +26 +14 4 14 -2 -3 +26 +27 15 4 -19 +21 +33 +34 +22 +4 +22 +34 +35 23 -11 -7 4 +13 +14 +18 17 -21 -9 -5 4 14 +15 +19 18 -6 -2 4 -21 -9 -10 +17 +18 22 -16 +21 4 -14 18 19 -15 +23 +22 4 +25 +26 30 -34 -35 +29 +4 +26 +27 31 +30 4 29 -33 -34 30 +34 +33 4 -26 30 31 -27 +35 +34 +18 4 -25 -29 -30 -26 +0 +4 +16 +12 4 +2 +6 18 -22 -23 -19 +14 4 -17 +4 +8 +20 +16 +4 +5 +9 21 -22 -18 +17 +4 +12 +16 +28 +24 4 13 17 -18 -14 +29 +25 4 -26 +0 +12 +13 +1 +4 +1 +13 14 -15 -27 +2 4 -33 -21 -22 -34 +5 +17 +18 +6 4 -34 -22 -23 -35 +8 +20 +21 +9 4 +12 +24 25 13 -14 -26 -4 -31 -35 -23 -19 4 +16 +28 29 -33 -21 17 4 -27 -31 -19 -15 +0 +1 +5 4 -25 -29 -17 -13 +4 +1 +2 6 +5 4 -28 -32 -33 -29 4 -32 -20 -21 -33 +5 +9 +8 4 -16 -20 -21 +13 +14 +18 17 4 -29 -33 -21 +16 17 -4 -28 -32 +21 20 -16 4 -28 -16 -17 +24 +25 29 +28 22 4 -25 -29 -30 -26 +24 +28 +40 +36 4 -38 -42 +27 +31 43 39 4 -37 -41 -42 -38 +28 +32 +44 +40 +4 +31 +35 +47 +43 4 +24 36 -40 -41 37 +25 4 -30 -34 -35 -31 +25 +37 +38 +26 4 -40 +26 +38 +39 +27 +4 +32 44 45 -41 +33 4 -29 33 +45 +46 34 -30 4 -28 -32 -33 +34 +46 +47 +35 +4 +24 +25 29 +28 4 +25 26 30 -31 +29 +4 +26 27 +31 +30 4 -24 28 29 -25 -4 -36 -24 -25 -37 -4 -45 33 -34 -46 -4 -44 32 -33 -45 4 -38 -26 -27 -39 -4 -37 -25 -26 -38 +29 +30 +34 +33 4 -43 -47 -35 +30 31 +35 +34 4 +36 +37 +41 40 -44 -32 -28 4 +37 +38 +42 +41 +4 +38 39 43 -31 -27 +42 4 -36 40 -28 -24 -4 41 45 -46 -42 +44 4 +41 +42 46 -34 -35 -47 +45 4 42 -46 -47 43 +47 +46 -91 -162 -243 +31 +102 +183 274 385 @@ -10319,10 +7397,10 @@ def _single_grid_3d_polytop_grid_vtu(self): -0 +3 1 2 -3 +0 4 @@ -10564,10 +7642,10 @@ def _gb_1_grid_2_vtu(self): 7 8 3 +3 8 9 4 -3 5 10 12 @@ -10576,14 +7654,14 @@ def _gb_1_grid_2_vtu(self): 12 14 7 +7 14 16 8 -7 8 -9 -18 16 +18 +9 11 20 21 @@ -10596,26 +7674,26 @@ def _gb_1_grid_2_vtu(self): 22 23 17 +17 23 24 19 -17 20 25 26 21 +21 26 27 22 -21 22 27 28 23 23 -24 -29 28 +29 +24 @@ -10880,14 +7958,14 @@ def _gb_1_mortar_grid_vtu(self): 3 3 4 +4 +5 5 6 6 7 7 8 -8 -9 @@ -11026,10 +8104,10 @@ def _gb_2_grid_1_vtu(self): 4 4 5 +4 +5 6 7 -8 -9 @@ -11254,10 +8332,10 @@ def _gb_2_grid_2_vtu(self): 7 8 3 +3 8 9 4 -3 5 10 12 @@ -11266,14 +8344,14 @@ def _gb_2_grid_2_vtu(self): 12 14 7 +7 15 18 8 -7 8 -9 -20 18 +20 +9 11 22 23 @@ -11286,26 +8364,26 @@ def _gb_2_grid_2_vtu(self): 24 25 19 +19 25 26 21 -19 22 27 28 23 +23 28 29 24 -23 24 29 30 25 25 -26 -31 30 +31 +26 @@ -11600,22 +8678,22 @@ def _gb_2_mortar_grid_1_vtu(self): 4 4 5 +4 +5 +5 6 7 -7 +8 +8 +9 8 9 10 +11 10 11 12 13 -14 -15 -16 -17 -18 -19 From da4ad7e9486b78836b45e4e1883df309fab90c37 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 4 Mar 2022 22:50:33 +0100 Subject: [PATCH 11/83] DOC: Enhance documentation of routine. --- src/porepy/viz/exporter.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index ed02623fa5..c78e59bfe0 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -155,7 +155,8 @@ def change_name(self, file_name: str) -> None: considered but on the same grid. Parameters: - file_name: the new root name of the files. + file_name (str): the new root name of the files. + """ self.file_name = file_name @@ -178,7 +179,7 @@ def write_vtu( data: if g is a single grid then data is a dictionary (see example) if g is a grid bucket then list of names for optional data, they are the keys in the grid bucket (see example). - time_dependent: (bolean, optional) If False, file names will not be appended with + time_dependent: (boolean, optional) If False, file names will not be appended with an index that markes the time step. Can be overridden by giving a value to time_step. time_step: (optional) in a time dependent problem defines the part of the file @@ -490,6 +491,14 @@ def _export_grid( """ Wrapper function to export grids of dimension dim. Calls the appropriate dimension specific export function. + + Parameters: + gs (Iterable[pp.Grid]): Subdomains of same dimension. + dim (int): Dimension of the subdomains. + + Returns: + Tuple[np.ndarray, np.ndarray, np.ndarray]: Points, cells (storing the + connectivity), and cell ids in correct meshio format. """ if dim == 0: return None @@ -510,7 +519,7 @@ def _export_1d( information from the 1d PorePy grids to meshio. Parameters: - gs (Iterable[pp.Grid]): Subdomains. + gs (Iterable[pp.Grid]): 1d subdomains. Returns: Tuple[np.ndarray, np.ndarray, np.ndarray]: Points, 1d cells (storing the @@ -605,7 +614,7 @@ def _export_2d( information from the 2d PorePy grids to meshio. Parameters: - gs (Iterable[pp.Grid]): Subdomains. + gs (Iterable[pp.Grid]): 2d subdomains. Returns: Tuple[np.ndarray, np.ndarray, np.ndarray]: Points, 2d cells (storing the @@ -848,7 +857,7 @@ def _export_3d( information from the 3d PorePy grids to meshio. Parameters: - gs (Iterable[pp.Grid]): Subdomains. + gs (Iterable[pp.Grid]): 3d subdomains. Returns: Tuple[np.ndarray, np.ndarray, np.ndarray]: Points, 3d cells (storing the From 8a650645b58cc9ed2a07c096dee0251b6514f47f Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sun, 6 Mar 2022 00:01:26 +0100 Subject: [PATCH 12/83] ENH/SPEEDUP: Export only data varying in time. By adding the keyword reuse_data=True (default is False) in Exporter, the previously exported time step is used to copy all mesh related data incl. point coordinates, connectivity information, grid dimensions, mortar info, etc. This speeds up writing data to file for between time steps in which grids remain fixed. --- src/porepy/viz/exporter.py | 216 ++++++++++++++++++++++++++++++++++++- 1 file changed, 214 insertions(+), 2 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index c78e59bfe0..b65071375f 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -11,6 +11,7 @@ import os import sys from typing import Dict, Generator, Iterable, List, Optional, Tuple, Union +from xml.etree import ElementTree as ET import meshio import numpy as np @@ -118,6 +119,7 @@ def __init__( # Check for optional keywords self.fixed_grid: bool = kwargs.pop("fixed_grid", True) self.binary: bool = kwargs.pop("binary", True) + self._reuse_data: bool = kwargs.pop("reuse_data", False) # NOTE for now, do not set True per default. if kwargs: msg = "Exporter() got unexpected keyword argument '{}'" raise TypeError(msg.format(kwargs.popitem()[0])) @@ -149,6 +151,16 @@ def __init__( # Storage for file name extensions for time steps self._exported_time_step_file_names: List[int] = [] + # Time step used in the latest write routine. Will be optionally used + # to retain constant geometrical and extra data in the next write routine. + # It will be used as control parameter, and needs to be reset when the + # general file name is replaces and/or the geometrical data is not + # constant over single time steps. + self._prev_exported_time_step: Union[int, type(None)] = None + + # Parameter to be used in several occasions for adding time stamps. + self.padding = 6 + def change_name(self, file_name: str) -> None: """ Change the root name of the files, useful when different keywords are @@ -160,6 +172,11 @@ def change_name(self, file_name: str) -> None: """ self.file_name = file_name + # Reset control parameter for the reuse of fixed data in the write routine. + # One could consider also allowing to reuse data from files with different + # names, but at the moment a more light-weight variant has been chosen. + self._prev_exported_time_step = None + # TODO do we need dicts here? do we want to support both for grid buckets (this does not make sense) def write_vtu( self, @@ -201,6 +218,9 @@ def write_vtu( # Update geometrical info in meshio format self._update_meshio_geom() + # Reset control parameter for the reuse of fixed data in the write routine. + self._prev_exported_time_step = None + # If the problem is time dependent, but no time step is set, we set one # using the updated, internal counter. if time_dependent and time_step is None: @@ -221,6 +241,10 @@ def write_vtu( else: raise NotImplemented("No other data type than list and dict supported.") + # Update previous time step used for export to be used in the next application. + if time_step is not None: + self._prev_exported_time_step = time_step + # TODO does not contain all keywords as in write_vtu. make consistent? def write_pvd( self, @@ -1027,8 +1051,196 @@ def _write(self, fields: Iterable[Field], file_name: str, meshio_geom) -> None: meshio_grid_to_export = meshio.Mesh( meshio_geom[0], meshio_geom[1], cell_data=cell_data ) + + # Exclude/block fixed geometric data as grid points and connectivity information + # by removing fields mesh and extra node/edge data from meshio_grid_to_export. + # Do this only if explicitly asked in init and if a suitable previous time step + # had been exported already. This data will be copied to the final VTKFile + # from a reference file from this previous time step. + to_copy_prev_exported_data = self._reuse_data and self._prev_exported_time_step is not None + if to_copy_prev_exported_data: + self._remove_fixed_mesh_data(meshio_grid_to_export) + + # Write mesh information and data to VTK format. meshio.write(file_name, meshio_grid_to_export, binary=self.binary) + # Include the fixed mesh data and the extra node/edge data by copy + # if remove earlier from meshio_grid_to_export. + if to_copy_prev_exported_data: + self._copy_fixed_mesh_data(file_name) + + def _remove_fixed_mesh_data(self, meshio_grid: meshio._mesh.Mesh) -> meshio._mesh.Mesh: + """Remove points, cells, and cell data related to the extra node and + edge data from given meshio_grid. + + Auxiliary routine in _write. + + Parameters: + meshio_grid (meshio._mesh.Mesh): The meshio grid to be cleaned. + + Returns: + meshio._mesh.Mesh: Cleaned meshio grid. + + """ + + # Empty points and cells deactivate the export of geometric information + # in meshio. For more details, check the method 'write' in meshio.vtu._vtu. + meshio_grid.points = np.empty((0,3)) + meshio_grid.cells = [] + + # Deactivate extra node/edge data, incl. cell_id as added in _export_list_data. + extra_node_edge_names = set([ + "cell_id", + "grid_dim", + "grid_node_number", + "grid_edge_number", + "is_mortar", + "mortar_side" + ]) + # Simply delete from meshio grid + for key in extra_node_edge_names: + if key in meshio_grid.cell_data: + del meshio_grid.cell_data[key] + + def _copy_fixed_mesh_data(self, file_name: str) -> None: + """Given two VTK files (in xml format), transfer all fixed geometric + info from a reference to a new file. + + This includes Piece, Points, Cells, and extra node and edge data. + + Parameters: + file_name(str): Incomplete VTK file, to be completed. + + """ + + # Determine the path to the reference file, based on the current + # file_name and the previous time step. + + # Find latest occurence of "." to identify extension. + # Reverse string in order to use index(). Add 1 to cancel + # the effect of te reverse. + dot_index = list(file_name)[::-1].index(".") + 1 + extension = "".join(list(file_name)[-dot_index:]) + if not extension == ".vtu": + raise NotImplementedError("Only support for VTKFile.") + + # Extract file_name without extension and the time_step stamp. + # For this, assume the time stamp occurs after the latest underscore. + # Use a similar approach as for finding the extension. + latest_underscore_index = list(file_name)[::-1].index("_") + file_name_without_time_extension = "".join(list(file_name)[:-latest_underscore_index]) + + # As in _make_file_name, use padding for the time stamp + prev_exported_time = str(self._prev_exported_time_step).zfill(self.padding) + + # Add the time stamp of the previously exported time step and the same extension. + ref_file_name = ( + file_name_without_time_extension + + prev_exported_time + + extension + ) + + # Fetch VTK file in XML format. + tree = ET.parse(file_name) + new_vtkfile = tree.getroot() + + # Fetch reference VTK file in XML format + ref_tree = ET.parse(ref_file_name) + ref_vtkfile = ref_tree.getroot() + + def fetch_child(root: ET.Element, key: str) -> Union[ET.Element, type(None)]: + """Find the unique (nested) child in xml file root with tag key. + """ + # Iterate over all children and count, by always just + # updating the latest counter. + count = 0 + for count, child in enumerate(root.iter(key)): + count += 1 + # Do not accept non-uniqueness + if count > 1: + raise ValueError(f"No child calles {key} present.") + # Return child, if it exists (and is unique) or None if not. + return child if count==1 else None + + def fetch_data_array( + root: ET.Element, + key: str, + check_existence: bool = True + ) -> Union[ET.Element, type(None)]: + """Find the unique DataArray in xml file root with "Name"=key, + where "Name" is an element of the attributed of the DataArray. + """ + # Count how many candidates have been found (hopefully in the end + # it is just one. + count = 0 + + # To search for nested children in xml files, iterate.. + for child in root.iter("DataArray"): + # Check if the child has the correct Name + if child.attrib["Name"] == key: + count += 1 + data_array = child + + # Check existence only if required. + if check_existence: + assert(count > 0) + + # Check uniqueness of candidates for correct . + if count > 1: + raise ValueError(f"DataArray with name {key} not unique.") + + # Return the DataArray if it exists, otherwise return None + return data_array if count > 0 else None + + # Transfer the attributes of the block. It contains + # the geometrical information on the number of points and cells. + ref_piece = fetch_child(ref_vtkfile, "Piece") + new_piece = fetch_child(new_vtkfile, "Piece") + new_piece.attrib = ref_piece.attrib + + # Transfer the content of the with "Name"="Points" + # among its attributes. It contains the coordinates of the points. + ref_points = fetch_data_array(ref_vtkfile, "Points") + new_points = fetch_data_array(new_vtkfile, "Points") + new_points.text = ref_points.text + + # Transfer the which contains cell information including + # cell types and the connectivity. is expected to be not included + # in the current VTKFile. It is therefore added as a whole. It is + # assumed to be a child of + ref_cells = fetch_child(ref_vtkfile, "Cells") + new_piece.insert(1, ref_cells) + + # Transfer extra node/edge data with the keys: + extra_node_edge_names = set([ + "cell_id", + "grid_dim", + "grid_node_number", + "grid_edge_number", + "is_mortar", + "mortar_side" + ]) + + # These are separately stored as under . Hence, fetch + # the corrsponding child and search for the . + new_cell_data = fetch_child(new_vtkfile, "CellData") + # If no available, add to and repeat search for . + if new_cell_data == None: + new_cell_data = ET.Element("CellData") + new_piece.append(new_cell_data) + + # Loop over all extra data to check whether it is included in the reference + # file. If so, copy it. + for key in extra_node_edge_names: + ref_data_array = fetch_data_array(ref_vtkfile, key, check_existence=False) + new_data_array = fetch_data_array(new_vtkfile, key, check_existence=False) + # Copy data only if it is included in the reference file, but not the current. + if ref_data_array is not None and new_data_array is None: + new_cell_data.append(ref_data_array) + + # Write updates to file + tree.write(file_name) + def _update_meshio_geom(self): for dim in self.dims: g = self.gb.get_grids(lambda g: g.dim == dim) @@ -1057,7 +1269,7 @@ def _make_file_name( extension: str = ".vtu", ) -> str: - padding = 6 + padding = self.padding if dim is None: # normal grid if time_step is None: return file_name + extension @@ -1080,7 +1292,7 @@ def _make_file_name_mortar( extension = ".vtu" file_name = file_name + "_mortar_" - padding = 6 + padding = self.padding if time_step is None: return file_name + str(dim) + extension else: From c25bf544c53ece9fd33a8eff8b5de1a4102ed7b5 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 11 Mar 2022 09:57:27 +0100 Subject: [PATCH 13/83] BUG: Use correct offset when defining the connectivity for simplices. --- src/porepy/viz/exporter.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index b65071375f..eb45b6104c 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -573,7 +573,7 @@ def _export_1d( nodes_offset = 0 cell_offset = 0 - # Loop over all 2d grids + # Loop over all 1d grids for g in gs: # Store node coordinates @@ -585,13 +585,14 @@ def _export_1d( # Add to previous connectivity information cell_to_nodes[cell_type] = np.vstack( - (cell_to_nodes[cell_type], cn_indices + cell_offset) + (cell_to_nodes[cell_type], cn_indices + nodes_offset) ) # Update offsets nodes_offset += g.num_nodes cell_offset += g.num_cells + # Construct the meshio data structure num_blocks = len(cell_to_nodes) meshio_cells = np.empty(num_blocks, dtype=object) @@ -621,10 +622,13 @@ def _simplex_cell_to_nodes(self, n: int, g: pp.Grid, cells: Optional[np.ndarray] # Determine cell-node ptr cn_indptr = g.cell_nodes().indptr[:-1] if cells is None else g.cell_nodes().indptr[cells] - # Each cell contains n nodes + + # Collect the indptr to all nodes of the cell; each cell contains n nodes expanded_cn_indptr = np.vstack([cn_indptr + i for i in range(n)]).reshape(-1, order="F") + # Detect all corresponding nodes by applying the expanded mask to the indices expanded_cn_indices = g.cell_nodes().indices[expanded_cn_indptr] + # Bring in correct form. cn_indices = np.reshape(expanded_cn_indices, (-1,n), order="C") From b94adc97cf6a0d787b8a3e6ee198e32484e74261 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sat, 12 Mar 2022 00:21:57 +0100 Subject: [PATCH 14/83] MAINT/ENH: Rewrite write_vtu in Exporter. Use different data management within Exporter which allows for various input types when specifying which data to print to file. In particular, one can specify data corresponding to a single grid while using other options for other data. --- src/porepy/viz/exporter.py | 880 ++++++++++++++++++++++++++++-------- src/porepy/viz/type_test.py | 72 +++ 2 files changed, 767 insertions(+), 185 deletions(-) create mode 100644 src/porepy/viz/type_test.py diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index eb45b6104c..645240ae96 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -19,11 +19,11 @@ import porepy as pp +from porepy.viz.type_test import * + # Module-wide logger logger = logging.getLogger(__name__) -import time - class Field: """ Internal class to store information for the data to export. @@ -177,48 +177,57 @@ def change_name(self, file_name: str) -> None: # names, but at the moment a more light-weight variant has been chosen. self._prev_exported_time_step = None - # TODO do we need dicts here? do we want to support both for grid buckets (this does not make sense) def write_vtu( self, - data: Optional[Union[Dict, List[str]]] = None, - time_dependent: bool = False, - time_step: int = None, - gb: Optional[Union[pp.Grid, pp.GridBucket]] = None, + data: Optional[Union[DataInput, List[DataInput]]] = None, + time_dependent: Optional[bool] = False, # TODO in use? e.g., when not each time step is printed? or is time_step usually used? + time_step: Optional[int] = None, + gb: Optional[Union[pp.Grid, pp.GridBucket]] = None, # TODO what if it is none? ) -> None: """ Interface function to export the grid and additional data with meshio. - In 1d the cells are represented as lines, 2d the cells as polygon or triangle/quad, - while in 3d as polyhedra/tetrahedra/hexahedra. + In 1d the cells are represented as lines, 2d the cells as polygon or + triangle/quad, while in 3d as polyhedra/tetrahedra/hexahedra. In all the dimensions the geometry of the mesh needs to be computed. Parameters: - data: if g is a single grid then data is a dictionary (see example) - if g is a grid bucket then list of names for optional data, - they are the keys in the grid bucket (see example). - time_dependent: (boolean, optional) If False, file names will not be appended with - an index that markes the time step. Can be overridden by giving - a value to time_step. - time_step: (optional) in a time dependent problem defines the part of the file - name associated with this time step. If not provided, subsequent - time steps will have file names ending with 0, 1, etc. - grid: (optional) in case of changing grid set a new one. + data (Union[DataInput, List[DataInput]], optional): node and + edge data, prescribed through strings, or tuples of + grids/edges, keys and values. If not provided only + geometical infos are exported. + time_dependent (boolean, optional): If False, file names will + not be appended with an index that markes the time step. + Can be overwritten by giving a value to time_step; if not, + the file names will subsequently be ending with 0, 1, etc. + time_step (int, optional): will be used ass eppendic to define + the file corresponding to this specific time step. + gb (Union[pp.Grid, pp.Gridbucket], optional): grid or gridbucket + if it is not fixed and should be updated. """ + + # Update the grid (bucket) but only if allowed, i.e., the initial grid + # (bucket) has not been characterized as fixed. if self.fixed_grid and gb is not None: raise ValueError("Inconsistency in exporter setting") + elif not self.fixed_grid and gb is not None: - # Convert to grid bucket if a grid is provided. + + # Require a grid bucket. Thus, convert grid -> grid bucket. if isinstance(gb, pp.Grid): + # Create a new grid bucket solely with a single grid as nodes. self.gb = pp.GridBucket() self.gb.add_nodes(gb) + else: self.gb = gb - # Update geometrical info in meshio format + # Update geometrical info in meshio format for the updated grid self._update_meshio_geom() - # Reset control parameter for the reuse of fixed data in the write routine. + # Reset control parameter for the reuse of fixed data in the + # write routine, so the updated mesh is used for further export. self._prev_exported_time_step = None # If the problem is time dependent, but no time step is set, we set one @@ -232,14 +241,22 @@ def write_vtu( if time_step is not None: self._exported_time_step_file_names.append(time_step) - # NOTE grid buckets are exported with dict data, while single - # grids (converted to grid buckets) are exported with list data. - if isinstance(data, list): - self._export_list_data(data, time_step) # type: ignore - elif isinstance(data, dict): - self._export_dict_data(data, time_step) - else: - raise NotImplemented("No other data type than list and dict supported.") + # Convert provided data to the most general type: NodeDataBaseType + # Also sort wrt. whether data is associated to nodes or edges. + node_data, edge_data = self._unify_data(data) + + # Add geometrical info for nodes and edges + node_data = self._add_extra_node_data(node_data) + edge_data = self._add_extra_edge_data(edge_data) + + # Export data and treat node and edge data separately + self._export_data(node_data, time_step) + self._export_data(edge_data, time_step) + + # Export pvd + file_name = self._make_file_name(self.file_name, time_step, extension=".pvd") + file_name = self._make_folder(self.folder_name, file_name) + self._export_pvd_gb(file_name, time_step) # Update previous time step used for export to be used in the next application. if time_step is not None: @@ -297,191 +314,408 @@ def write_pvd( o_file.write("\n" + "") o_file.close() - def _export_dict_data(self, data: Dict[str, np.ndarray], time_step): - """ - Export single grid to a vtu file with dictionary data. + # Some auxiliary routines used in write_vtu() + def _unify_data( + self, data: Optional[Union[DataInput, List[DataInput]]] = None, + ) -> Tuple[NodeData, EdgeData]: + """Bring data in unified format to be further used in the export. + + The routine has two goals: Splitting data into node and edge data, + as well as transforming all data to be exported into dictionaries + of the format {(grid/edge, key): value)}. + Parameters: - data (Dict[str, np.ndarray]): Data to be exported. - time_step (float) : Time step, to be appended at the vtu output file. + data (Union[DataInput, List[DataInput]], optional): data + provided by the user in the form of strings and/or tuples + of grids/edges. + + Returns: + Tuple[NodeData, EdgeData]: Node and edge data decomposed and + brought into unified format. """ - # Currently this method does only support single grids. - if self.dims.shape[0] > 1: - raise NotImplementedError("Grid buckets with more than one grid are not supported.") - - # No need of special naming, create the folder - name = self._make_folder(self.folder_name, self.file_name) - name = self._make_file_name(name, time_step) - # Provide an empty dict if data is None + # Convert data to list, while keeping the data structures provided, + # or provide an empty list if no data provided if data is None: - data = dict() + data = list() + elif not isinstance(data, list): + data = [data] + + # Initialize container for data associated to nodes + node_data: Dict[NodeDataKey, np.ndarray] = dict() + edge_data: Dict[EdgeDataKey, np.ndarray] = dict() + + # Auxiliary function transforming scalar ranged values + # to vector ranged values when suitable. + def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: + """Check wether the value array has the right dimension correpsonding + to the grid size. If possible, translate the value to a vectorial + object, But do nothing if the data naturally can be interpreted as + scalar data. + """ + # Make some checks + if not value.size % g.num_cells == 0: + raise ValueError("The data array is not compatible with the grid.") + + # Convert to vectorial data if more data provided than grid cells available, + # and the value array is not already in vectorial format + if not value.size == g.num_cells and not (len(value.shape) > 1 and value.shape[1] == g.num_cells): + value = np.reshape(value, (-1, g.num_cells), "F") + + return value + + # Loop over all data points and convert them collect them in the format + # (grid/edge, key, data) for single grids etc. + for pt in data: + + # Allow for different cases. Distinguish each case separately + + # Case 1: Data provided by the key of a field only - could be both node + # and edge data. Idea: Collect all data corresponding to grids and edges + # related to the key. + if isinstance(pt, str): + + # The data point is simply a string addressing a state using a key + key = pt + + # Fetch grids and interfaces as well as data associated to the key + has_key = False + + # Try to find the key in the data dictionary associated to nodes + for g, d in self.gb.nodes(): + if pp.STATE in d and key in d[pp.STATE]: + # Mark the key as found + has_key = True + + # Fetch data and convert to vectorial format if suggested by the size + value: np.ndarray = toVectorFormat(d[pp.STATE][key], g) + + # Add data point in correct format to the collection + node_data[(g, key)] = value + + # Try to find the key in the data dictionary associated to edges + for e, d in self.gb.edges(): + if pp.STATE in d and key in d[pp.STATE]: + # Mark the key as found + has_key = True + + # Fetch data and convert to vectorial format if suggested by the size + mg = d["mortar_grid"] + value: np.ndarray = toVectorFormat(d[pp.STATE][key], mg) # TODO g? + + # Add data point in correct format to the collection + edge_data[(e, key)] = value + + # Make sure the key exists + if not has_key: + raise ValueError(f"No data with provided key {key} present in the grid bucket.") + + # Case 2a: Data provided by a tuple ([g], key) where [g] is a single grid + # or a list of grids.Idea: Collect the data only among the grids specified. + elif isTupleOf_entity_str(pt, entity="node"): + + # By construction, the first component contains grids. + # Unify by converting the first component to a list + grids: List[pp.Grid] = pt[0] if isinstance(pt[0], list) else [pt[0]] + + # By construction, the second component contains a key. + key: str = pt[1] + + # Loop over grids and fetch the states corresponding to the key + for g in grids: - # Store the provided data as list of Fields - fields: List[Field] = [] - if len(data) > 0: - fields.extend([Field(n, v) for n, v in data.items()]) + # Fetch the data dictionary containing the data value + d = self.gb.node_props(g) # TODO type - # Extract grid (it is assumed it is the only one) - grid = [g for g, _ in self.gb.nodes()][0] + # Make sure the data exists. + assert(pp.STATE in d and key in d[pp.STATE]) - # Add grid dimension to the data - grid_dim = grid.dim * np.ones(grid.num_cells, dtype=int) - fields.extend([Field("grid_dim", grid_dim)]) + # Fetch data and convert to vectorial format if suitable + value: np.ndarray = toVectorFormat(d[pp.STATE][key], g) - # Write data to file - self._write(fields, name, self.meshio_geom[grid.dim]) + # Add data point in correct format to collection + node_data[(g, key)] = value - def _export_list_data(self, data: List[str], time_step: float) -> None: - """Export the entire GridBucket and additional list data to vtu. + # Case 2b: Data provided by a tuple ([e], key) where [e] is a single edge + # or a list of edges. Idea: Collect the data only among the grids specified. + elif isTupleOf_entity_str(pt, entity="edge"): - Parameters: - data (List[str]): Data to be exported in addition to default GridBucket data. - time_step (float) : Time step, to be appended at the vtu output file. - """ - # Convert data to list, or provide an empty list - if data is not None: - data = np.atleast_1d(data).tolist() - else: - data = list() + # By construction, the first component contains grids. + # Unify by converting the first component to a list + edges: Union[Edge, List[Edge]] = pt[0] if isinstance(pt[0], list) else [pt[0]] - # Extract data which is contained in nodes (and not edges). - # IMPLEMENTATION NOTE: We need a unique set of keywords for node_data. The simpler - # option would have been to gather all keys and uniquify by converting to a set, - # and then back to a list. However, this will make the ordering of the keys random, - # and it turned out that this complicates testing (see tests/unit/test_vtk). - # It was therefore considered better to use a more complex loop which - # (seems to) guarantee a deterministic ordering of the keys. - node_data = list() - # For each element in data, apply a brute force approach and check whether there - # exists a data dictionary associated to a node which contains a state variable - # with same key. If so, add the key and move on to the next key. - for key in data: - for _, d in self.gb.nodes(): - if pp.STATE in d and key in d[pp.STATE]: - node_data.append(key) - # After successfully identifying data contained in nodes, break the loop - # over nodes to avoid any unintended repeated listing of key. - break + # By construction, the second component contains a key. + key: str = pt[1] - # Transfer data to fields. - node_fields: List[Field] = [] - if len(node_data) > 0: - node_fields.extend([Field(d) for d in node_data]) + # Loop over edges and fetch the states corresponding to the key + for e in edges: - # consider the grid_bucket node data - extra_node_names = ["grid_dim", "grid_node_number", "is_mortar", "mortar_side"] - extra_node_fields = [Field(name) for name in extra_node_names] - node_fields.extend(extra_node_fields) + # Fetch the data dictionary containing the data value + d = self.gb.edge_props(e) # TODO type - self.gb.assign_node_ordering(overwrite_existing=False) - self.gb.add_node_props(extra_node_names) - # fill the extra data - for g, d in self.gb: - ones = np.ones(g.num_cells, dtype=int) - d["grid_dim"] = g.dim * ones - d["grid_node_number"] = d["node_number"] * ones - d["is_mortar"] = 0 * ones - d["mortar_side"] = pp.grids.mortar_grid.MortarSides.NONE_SIDE.value * ones + # Make sure the data exists. + assert(pp.STATE in d and key in d[pp.STATE]) - # collect the data and extra data in a single stack for each dimension - for dim in self.dims: - file_name = self._make_file_name(self.file_name, time_step, dim) - file_name = self._make_folder(self.folder_name, file_name) - for field in node_fields: - grids = self.gb.get_grids(lambda g: g.dim == dim) - values = [] - for g in grids: - if field.name in data: - values.append(self.gb.node_props(g, pp.STATE)[field.name]) - else: - values.append(self.gb.node_props(g, field.name)) - field.check(values[-1], g) - field.values = np.hstack(values) + # Fetch data and convert to vectorial format if suitable + mg = d["mortar_grid"] + value: np.ndarray = toVectorFormat(d[pp.STATE][key], mg) - if self.meshio_geom[dim] is not None: - self._write(node_fields, file_name, self.meshio_geom[dim]) + # Add data point in correct format to collection + edge_data[(e, key)] = value - self.gb.remove_node_props(extra_node_names) + # TODO include this case at all? It is currently kept as close to the previous use case. - # Extract data which is contained in edges (and not nodes). - # IMPLEMENTATION NOTE: See the above loop to construct node_data for an explanation - # of this elaborate construction of `edge_data` - edge_data = list() - for key in data: - for _, d in self.gb.edges(): - if pp.STATE in d and key in d[pp.STATE]: - edge_data.append(key) - # After successfully identifying data contained in edges, break the loop - # over edges to avoid any unintended repeated listing of key. - break + # Case 3: Data provided by a tuple (key, data). Here, the grid is + # implicitly is addressed. This only works if there is just a single + # grid. Idea: Extract the unique grid and continue as in Case 2. + elif isTupleOf_str_array(pt): + # Fetch the correct grid. This option is only supported for grid buckets containing a single grid. - # Transfer data to fields. - edge_fields: List[Field] = [] - if len(edge_data) > 0: - edge_fields.extend([Field(d) for d in edge_data]) + # Collect all grids + grids: List[pp.Grid] = [g for g, _ in self.gb.nodes()] - # consider the grid_bucket edge data - extra_edge_names = ["grid_dim", "grid_edge_number", "is_mortar", "mortar_side"] - extra_edge_fields = [Field(name) for name in extra_edge_names] - edge_fields.extend(extra_edge_fields) + # Make sure there exists only a single grid + if not len(grids)==1: + raise ValueError(f"The data type used for {pt} is only supported if the grid bucket only contains a single grid.") - self.gb.add_edge_props(extra_edge_names) - # fill the extra data - for _, d in self.gb.edges(): - d["grid_dim"] = {} - d["cell_id"] = {} - d["grid_edge_number"] = {} - d["is_mortar"] = {} - d["mortar_side"] = {} + # Extract the unique grid + g = grids[0] + + # Fetch remaining ingredients required to define node data element + key: str = pt[0] + value: np.ndarray = toVectorFormat(pt[1], g) + + # Add data point in correct format to collection + node_data[(g, key)] = value + + # Case 4a: Data provided as tuple (g, key, data). Idea: Since this is + # already the desired format, process naturally. + elif isTupleOf_entity_str_array(pt, entity = "node"): + # Data point in correct format + g: pp.Grid = pt[0] + key: str = pt[1] + value: np.ndarray = toVectorFormat(pt[2], g) + + # Add data point in correct format to collection + node_data[(g, key)] = value + + # Case 4b: Data provided as tuple (e, key, data). Idea: Since this is + # already the desired format, process naturally. + elif isTupleOf_entity_str_array(pt, entity = "edge"): + # Data point in correct format + e: Edge = pt[0] + key: str = pt[1] + d = self.gb.edge_props(e) # TODO type + mg = d["mortar_grid"] + value: np.ndarray = toVectorFormat(pt[2], mg) + + # Add data point in correct format to collection + edge_data[(e, key)] = value + + else: + raise ValueError(f"The provided data type used for {pt} is not supported.") + + return node_data, edge_data + + def _add_extra_node_data(self, node_data: NodeData) -> NodeData: + """Enahnce node data with geometrical information. + + Parameters: + node_data (NodeData, optional): data contains which will be + enhanced + + Returns: + NodeData: previous node_data with additional information on + the grid dimension, mortar side and whether the grid is + mortar. + """ + # All extra fields to be added to the data container + self._extra_node_data = ["grid_dim", "is_mortar", "mortar_side"] + + # Add info by direct assignment + for g, d in self.gb.nodes(): + ones = np.ones(g.num_cells, dtype=int) + node_data[(g, "grid_dim")] = g.dim * ones + #node_data[(g, "grid_node_number")] = d["node_number"] * ones # TODO? + node_data[(g, "is_mortar")] = 0 * ones + node_data[(g, "mortar_side")] = pp.grids.mortar_grid.MortarSides.NONE_SIDE.value * ones + + return node_data + + def _add_extra_edge_data(self, edge_data: EdgeData) -> EdgeData: + """Enahnce edge data with geometrical information. + + Parameters: + edge_data (EdgeData, optional): data contains which will be + enhanced + + Returns: + EdgeData: previous edge_data with additional information on + the grid dimension, the cell ids, the grid edge number, + mortar side and whether the grid is mortar. + """ + # All extra fields to be added to the data container + self._extra_edge_data = ["grid_dim", "cell_id", "grid_edge_number", "is_mortar", "mortar_side"] + + # Add info by direct assignment - have to take into account values on both sides + # TODO can't we do this more explicit and by that shorter and simpler? + for e, d in self.gb.edges(): + + # Fetch mortar grid mg = d["mortar_grid"] + + # Construct empty arrays for all extra edge data + edge_data[(e, "grid_dim")] = np.empty(0, dtype=int) + edge_data[(e, "cell_id")] = np.empty(0, dtype=int) + edge_data[(e, "grid_edge_number")] = np.empty(0, dtype=int) + edge_data[(e, "is_mortar")] = np.empty(0, dtype=int) + edge_data[(e, "mortar_side")] = np.empty(0, dtype=int) + + # Assign extra edge data by collecting values on both sides. mg_num_cells = 0 for side, g in mg.side_grids.items(): ones = np.ones(g.num_cells, dtype=int) - d["grid_dim"][side] = g.dim * ones - d["is_mortar"][side] = ones - d["mortar_side"][side] = side.value * ones - d["cell_id"][side] = np.arange(g.num_cells, dtype=int) + mg_num_cells + + # Grid dimension of the mortar grid + edge_data[(e, "grid_dim")] = np.hstack( + ( + edge_data[(e, "grid_dim")], + g.dim * ones + ) + ) + + # Cell ids of the mortar grid + edge_data[(e, "cell_id")] = np.hstack( + ( + edge_data[(e, "cell_id")], + np.arange(g.num_cells, dtype=int) + mg_num_cells + ) + ) + + # Grid edge number of each edge + edge_data[(e, "grid_edge_number")] = np.hstack( + ( + edge_data[(e, "grid_edge_number")], + d["edge_number"] * ones + ) + ) + + # Whether the edge is mortar + edge_data[(e, "is_mortar")] = np.hstack( + ( + edge_data[(e, "is_mortar")], + ones + ) + ) + + # Side of the mortar + edge_data[(e, "mortar_side")] = np.hstack( + ( + edge_data[(e, "mortar_side")], + side.value * ones + ) + ) + mg_num_cells += g.num_cells - d["grid_edge_number"][side] = d["edge_number"] * ones - # collect the data and extra data in a single stack for each dimension - for dim in self.m_dims: - file_name = self._make_file_name_mortar( - self.file_name, time_step=time_step, dim=dim - ) - file_name = self._make_folder(self.folder_name, file_name) + return edge_data - mgs = self.gb.get_mortar_grids(lambda g: g.dim == dim) - cond = lambda d: d["mortar_grid"].dim == dim - edges: List[Tuple[pp.Grid, pp.Grid]] = [ - e for e, d in self.gb.edges() if cond(d) - ] + def _export_data( + self, + data: Union[NodeData, EdgeData], + time_step: int + ) -> None: + """Routine for collecting data associated to a single grid dimension + and passing further to the final writing routine. + + For each fixed dimension, all subdomains of that dimension and the data + related to that subdomains will be exported simultaneously. The same + for interfaces. - for field in edge_fields: - values = [] - for mg, edge in zip(mgs, edges): - if field.name in data: - values.append(self.gb.edge_props(edge, pp.STATE)[field.name]) - else: - for side, _ in mg.side_grids.items(): - # Convert edge to tuple to be compatible with GridBucket - # data structure - values.append(self.gb.edge_props(edge, field.name)[side]) + Parameters: + data (Union[NodeData, EdgeData]): Node or edge data. The routine + notices itself of which type the data is and proceeds + accordingly. + time_step (int): time_step to be used to append the file name. + """ + # Deterimine whether data corresponds to node or edge data. + # Node data and edge data will be treated differently throughout + # the export procedure. + is_node_data = all([isTupleOf_entity_str(pt, entity="node") for pt in data]) + is_edge_data = all([isTupleOf_entity_str(pt, entity="edge") for pt in data]) + if not is_node_data and not is_edge_data: + raise ValueError("data has to be consistently either of node or edge data type.") + + # Collect unique keys, and for unique sorting, sort by alphabet + keys = list(set([key for _,key in data])) + keys.sort() + + # Fetch the dimnensions to be traversed. For nodes, fetch the dimensions + # of the available grids, and for edges fetch the dimensions of the available + # mortar grids. + dims = self.dims if is_node_data else self.m_dims + + # Collect the data and extra data in a single stack for each dimension + for dim in dims: + # Define the file name depending on data type + if is_node_data: + file_name = self._make_file_name(self.file_name, time_step, dim) + elif is_edge_data: + file_name = self._make_file_name_mortar( + self.file_name, time_step=time_step, dim=dim + ) - field.values = np.hstack(values) + # Append folder name to file name + file_name = self._make_folder(self.folder_name, file_name) - if self.m_meshio_geom[dim] is not None: - self._write(edge_fields, file_name, self.m_meshio_geom[dim]) + # Get all geometrical entities of dimension dim: subdomains + # with correct grid dimension for node data, and interfaces + # with correct mortar grid dimension for edge data. + entities: Union[List[pp.Grid], List[Tuple[pp.Grid, pp.Grid]]] = [] + if is_node_data: + entities = self.gb.get_grids(lambda g: g.dim == dim) + elif is_edge_data: + entities = [e for e, d in self.gb.edges() if d["mortar_grid"].dim == dim] - file_name = self._make_file_name(self.file_name, time_step, extension=".pvd") - file_name = self._make_folder(self.folder_name, file_name) - self._export_pvd_gb(file_name, time_step) + # Construct the list of fields represented on this dimension. + fields: List[Field] = [] + for key in keys: - self.gb.remove_edge_props(extra_edge_names) + # Collect the values associated to all entities + values = [] + for e in entities: + if (e,key) in data: + values.append(data[(e,key)]) + + # Require data for all or none entities of that dimension. + assert(len(values) in [0, len(entities)]) + + # If data has been found, append data to list after stacking + # values for different entities + if values: + field = Field(key) + field.values = np.hstack(values) + fields.append(field) + + # Print data for the particular dimension. Since geometric + # info is required distinguish between node and edge data. + meshio_geom = self.meshio_geom[dim] if is_node_data else self.m_meshio_geom[dim] + if meshio_geom is not None: + self._write(fields, file_name, meshio_geom) + + def _export_pvd_gb(self, file_name: str, time_step: int) -> None: + """Routine to export to pvd format and collect all data scattered over + several files for distinct grid dimensions. - def _export_pvd_gb(self, file_name: str, time_step: float) -> None: + Parameters: + file_name (str): storage path for pvd file + time_step (int): used as appendix for the file name + """ + # Open the file o_file = open(file_name, "w") + + # Write VTK header to file b = "LittleEndian" if sys.byteorder == "little" else "BigEndian" c = ' compressor="vtkZLibDataCompressor"' header = ( @@ -493,13 +727,15 @@ def _export_pvd_gb(self, file_name: str, time_step: float) -> None: o_file.write(header) fm = '\t\n' - # Include the grids and mortar grids of all dimensions, but only if the - # grids (or mortar grids) of this dimension are included in the vtk export + # Include the grids of all dimensions, but only if the grids of this + # dimension are included in the vtk export for dim in self.dims: if self.meshio_geom[dim] is not None: o_file.write( fm % self._make_file_name(self.file_name, time_step, dim=dim) ) + + # Same for mortar grids. for dim in self.m_dims: if self.m_meshio_geom[dim] is not None: o_file.write( @@ -569,9 +805,8 @@ def _export_1d( num_pts = np.sum([g.num_nodes for g in gs]) meshio_pts = np.empty((num_pts, 3)) # type: ignore - # Initialize offsets. Required taking into account multiple 1d grids. + # Initialize offset. Required taking into account multiple 1d grids. nodes_offset = 0 - cell_offset = 0 # Loop over all 1d grids for g in gs: @@ -590,8 +825,6 @@ def _export_1d( # Update offsets nodes_offset += g.num_nodes - cell_offset += g.num_cells - # Construct the meshio data structure num_blocks = len(cell_to_nodes) @@ -681,14 +914,18 @@ def _export_2d( # Loop over all available cell types and group cells of one type. g_cell_map = dict() for n in np.unique(num_faces_per_cell): + # Define cell type; check if it coincides with a predefined cell type cell_type = polygon_map.get(f"polygon{n}", f"polygon{n}") + # Find all cells with n faces, and store for later use cells = np.nonzero(num_faces_per_cell == n)[0] g_cell_map[cell_type] = cells + # Store cell ids in global container; init if entry not yet established if cell_type not in cell_id: cell_id[cell_type] = [] + # Add offset taking into account previous grids cell_id[cell_type] += (cells + cell_offset).tolist() @@ -1302,3 +1539,276 @@ def _make_file_name_mortar( else: time = str(time_step).zfill(padding) return file_name + str(dim) + "_" + time + extension + + + + + + #################################################### + # TODO + # Remove! + + # TODO do we need dicts here? do we want to support both for grid buckets (this does not make sense) + def old_write_vtu( + self, + data: Optional[Union[Dict, List[str]]] = None, # TODO update + time_dependent: bool = False, # TODO in use? e.g., when not each time step is printed? or is time_step usually used? + time_step: int = None, + gb: Optional[Union[pp.Grid, pp.GridBucket]] = None, + ) -> None: + """ + Interface function to export the grid and additional data with meshio. + + In 1d the cells are represented as lines, 2d the cells as polygon or + triangle/quad, while in 3d as polyhedra/tetrahedra/hexahedra. + In all the dimensions the geometry of the mesh needs to be computed. + + Parameters: + data: if g is a single grid then data is a dictionary (see example) + if g is a grid bucket then list of names for optional data, + they are the keys in the grid bucket (see example). + time_dependent: (boolean, optional) If False, file names will not be appended with + an index that markes the time step. Can be overridden by giving + a value to time_step. + time_step: (optional) in a time dependent problem defines the part of the file + name associated with this time step. If not provided, subsequent + time steps will have file names ending with 0, 1, etc. + grid: (optional) in case of changing grid set a new one. + + """ + + # Update the grid (bucket) but only if allowed, i.e., the initial grid + # (bucket) has not been characterized as fixed. + if self.fixed_grid and gb is not None: + raise ValueError("Inconsistency in exporter setting") + + elif not self.fixed_grid and gb is not None: + + # Require a grid bucket. Thus, convert grid -> grid bucket. + if isinstance(gb, pp.Grid): + # Create a new grid bucket solely with a single grid as nodes. + self.gb = pp.GridBucket() + self.gb.add_nodes(gb) + + else: + self.gb = gb + + # Update geometrical info in meshio format for the updated grid + self._update_meshio_geom() + + # Reset control parameter for the reuse of fixed data in the + # write routine, so the updated mesh is used for further export. + self._prev_exported_time_step = None + + # TODO rm? OK to keep for now! + # If the problem is time dependent, but no time step is set, we set one + # using the updated, internal counter. + if time_dependent and time_step is None: + time_step = self._time_step_counter + self._time_step_counter += 1 + + # If the problem is time dependent (with specified or automatic time step index) + # add the time step to the exported files + if time_step is not None: + self._exported_time_step_file_names.append(time_step) + + # TODO change this! + # NOTE grid buckets are exported with dict data, while single + # grids (converted to grid buckets) are exported with list data. + if isinstance(data, list): + self._export_list_data(data, time_step) # type: ignore + elif isinstance(data, dict): + self._export_dict_data(data, time_step) + else: + raise NotImplemented("No other data type than list and dict supported.") + + # Update previous time step used for export to be used in the next application. + if time_step is not None: + self._prev_exported_time_step = time_step + + def _export_dict_data(self, data: Dict[str, np.ndarray], time_step): + """ + Export single grid to a vtu file with dictionary data. + + Parameters: + data (Dict[str, np.ndarray]): Data to be exported. + time_step (float) : Time step, to be appended at the vtu output file. + """ + # Currently this method does only support single grids. + if self.dims.shape[0] > 1: + raise NotImplementedError("Grid buckets with more than one grid are not supported.") + + # No need of special naming, create the folder + name = self._make_folder(self.folder_name, self.file_name) + name = self._make_file_name(name, time_step) + + # Provide an empty dict if data is None + if data is None: + data = dict() + + # Store the provided data as list of Fields + fields: List[Field] = [] + if len(data) > 0: + fields.extend([Field(n, v) for n, v in data.items()]) + + # Extract grid (it is assumed it is the only one) + grid = [g for g, _ in self.gb.nodes()][0] + + # Add grid dimension to the data + grid_dim = grid.dim * np.ones(grid.num_cells, dtype=int) + fields.extend([Field("grid_dim", grid_dim)]) + + # Write data to file + self._write(fields, name, self.meshio_geom[grid.dim]) + + def _export_list_data(self, data: List[str], time_step: float) -> None: + """Export the entire GridBucket and additional list data to vtu. + + Parameters: + data (List[str]): Data to be exported in addition to default GridBucket data. + time_step (float) : Time step, to be appended at the vtu output file. + """ + # Convert data to list, or provide an empty list + if data is not None: + data = np.atleast_1d(data).tolist() + else: + data = list() + + # TODO simplify + + # Extract data which is contained in nodes (and not edges). + # IMPLEMENTATION NOTE: We need a unique set of keywords for node_data. The simpler + # option would have been to gather all keys and uniquify by converting to a set, + # and then back to a list. However, this will make the ordering of the keys random, + # and it turned out that this complicates testing (see tests/unit/test_vtk). + # It was therefore considered better to use a more complex loop which + # (seems to) guarantee a deterministic ordering of the keys. + node_data = list() + # For each element in data, apply a brute force approach and check whether there + # exists a data dictionary associated to a node which contains a state variable + # with same key. If so, add the key and move on to the next key. + for key in data: + for _, d in self.gb.nodes(): + if pp.STATE in d and key in d[pp.STATE]: + node_data.append(key) + # After successfully identifying data contained in nodes, break the loop + # over nodes to avoid any unintended repeated listing of key. + break + + # Transfer data to fields. + # TODO Field structure required here? or can we use something standard in Python? + # Collect all provided keywords and + node_fields: List[Field] = [] + if len(node_data) > 0: + node_fields.extend([Field(d) for d in node_data]) + + # Include consider the grid_bucket node data + extra_node_names = ["grid_dim", "grid_node_number", "is_mortar", "mortar_side"] + extra_node_fields = [Field(name) for name in extra_node_names] + node_fields.extend(extra_node_fields) + + self.gb.assign_node_ordering(overwrite_existing=False) + self.gb.add_node_props(extra_node_names) + # fill the extra data + for g, d in self.gb: + ones = np.ones(g.num_cells, dtype=int) + d["grid_dim"] = g.dim * ones + d["grid_node_number"] = d["node_number"] * ones + d["is_mortar"] = 0 * ones + d["mortar_side"] = pp.grids.mortar_grid.MortarSides.NONE_SIDE.value * ones + + # collect the data and extra data in a single stack for each dimension + for dim in self.dims: + file_name = self._make_file_name(self.file_name, time_step, dim) + file_name = self._make_folder(self.folder_name, file_name) + for field in node_fields: + grids = self.gb.get_grids(lambda g: g.dim == dim) + values = [] + for g in grids: + if field.name in data: + values.append(self.gb.node_props(g, pp.STATE)[field.name]) + else: + values.append(self.gb.node_props(g, field.name)) + field.check(values[-1], g) + field.values = np.hstack(values) + + if self.meshio_geom[dim] is not None: + self._write(node_fields, file_name, self.meshio_geom[dim]) + + self.gb.remove_node_props(extra_node_names) + + # Extract data which is contained in edges (and not nodes). + # IMPLEMENTATION NOTE: See the above loop to construct node_data for an explanation + # of this elaborate construction of `edge_data` + edge_data = list() + for key in data: + for _, d in self.gb.edges(): + if pp.STATE in d and key in d[pp.STATE]: + edge_data.append(key) + # After successfully identifying data contained in edges, break the loop + # over edges to avoid any unintended repeated listing of key. + break + + # Transfer data to fields. + edge_fields: List[Field] = [] + if len(edge_data) > 0: + edge_fields.extend([Field(d) for d in edge_data]) + + # consider the grid_bucket edge data + extra_edge_names = ["grid_dim", "grid_edge_number", "is_mortar", "mortar_side"] + extra_edge_fields = [Field(name) for name in extra_edge_names] + edge_fields.extend(extra_edge_fields) + + self.gb.add_edge_props(extra_edge_names) + # fill the extra data + for _, d in self.gb.edges(): + d["grid_dim"] = {} + d["cell_id"] = {} + d["grid_edge_number"] = {} + d["is_mortar"] = {} + d["mortar_side"] = {} + mg = d["mortar_grid"] + mg_num_cells = 0 + for side, g in mg.side_grids.items(): + ones = np.ones(g.num_cells, dtype=int) + d["grid_dim"][side] = g.dim * ones + d["is_mortar"][side] = ones + d["mortar_side"][side] = side.value * ones + d["cell_id"][side] = np.arange(g.num_cells, dtype=int) + mg_num_cells + mg_num_cells += g.num_cells + d["grid_edge_number"][side] = d["edge_number"] * ones + + # collect the data and extra data in a single stack for each dimension + for dim in self.m_dims: + file_name = self._make_file_name_mortar( + self.file_name, time_step=time_step, dim=dim + ) + file_name = self._make_folder(self.folder_name, file_name) + + mgs = self.gb.get_mortar_grids(lambda g: g.dim == dim) + cond = lambda d: d["mortar_grid"].dim == dim + edges: List[Tuple[pp.Grid, pp.Grid]] = [ + e for e, d in self.gb.edges() if cond(d) + ] + + for field in edge_fields: + values = [] + for mg, edge in zip(mgs, edges): + if field.name in data: + values.append(self.gb.edge_props(edge, pp.STATE)[field.name]) + else: + for side, _ in mg.side_grids.items(): + # Convert edge to tuple to be compatible with GridBucket + # data structure + values.append(self.gb.edge_props(edge, field.name)[side]) + + field.values = np.hstack(values) + + if self.m_meshio_geom[dim] is not None: + self._write(edge_fields, file_name, self.m_meshio_geom[dim]) + + file_name = self._make_file_name(self.file_name, time_step, extension=".pvd") + file_name = self._make_folder(self.folder_name, file_name) + self._export_pvd_gb(file_name, time_step) + + self.gb.remove_edge_props(extra_edge_names) diff --git a/src/porepy/viz/type_test.py b/src/porepy/viz/type_test.py new file mode 100644 index 0000000000..f3180b7c7a --- /dev/null +++ b/src/porepy/viz/type_test.py @@ -0,0 +1,72 @@ +# TODO DOC +import porepy as pp +import numpy as np + +def is_Edge(e) -> bool: + """Implementation if isinstance(g, Tuple[pp.Grid, pp.Grid]).""" + + return ( + isinstance(e, tuple) and + len(e) == 2 and + isinstance(e[0], pp.Grid) and + isinstance(e[1], pp.Grid) + ) + +def isTupleOf_entity_str(pt, entity) -> bool: + """Implementation of isinstance(pt, Tuple[Union[pp.Grid, List[pp.Grid]], str]), if entity = "node", and + isinstance(pt, Tuple[Union[Tuple[pp.Grid, pp.Grid], List[Tuple[pp.Grid, pp.Grid]]], str]) if entity = "edge". + """ + + if entity not in ["node", "edge"]: + raise ValueError(f"Entity type {entity} not supported.") + + return ( + # Check whether the input is a tuple with two entries + isinstance(pt, tuple) and + len(pt) == 2 and + ( + # Check whether the first component is a grid or a list of grids + entity == "node" and + ( + isinstance(pt[0], pp.Grid) or + isinstance(pt[0], list) and all([isinstance(g, pp.Grid) for g in pt[0]]) + ) or + + # Check whether the first component is an edge or a list of edges + entity == "edge" and + ( + is_Edge(pt[0]) or + isinstance(pt[0], list) and all([is_Edge(g) for g in pt[0]]) + ) + ) and + # Check whether the second component is a string + isinstance(pt[1], str) + ) + +def isTupleOf_entity_str_array(pt, entity) -> bool: + """Implementation of isinstance(pt, Tuple[pp.Grid, str, np.ndarray] if entity="node", + and isinstance(pt, Tuple[Tuple[pp.Grid, pp.Grid], str, np.ndarray]) if entity="edge".""" + + if entity not in ["node", "edge"]: + raise ValueError(f"Entity type {entity} not supported.") + + return ( + isinstance(pt, tuple) and + len(pt) == 3 and + ( + entity == "node" and isinstance(pt[0], pp.Grid) or + entity == "edge" and is_Edge(pt[0]) + ) and + isinstance(pt[1], str) and + isinstance(pt[2], np.ndarray) + ) + +def isTupleOf_str_array(pt) -> bool: + """Implementation if isinstance(pt, Tuple[str, np.ndarray].""" + + return ( + isinstance(pt, tuple) and + len(pt) == 2 and + isinstance(pt[0], str) and + isinstance(pt[1], np.ndarray) + ) From d07f82523cde8dd25dcec5b5cf5644e0421c6d6c Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Mon, 14 Mar 2022 22:12:12 +0100 Subject: [PATCH 15/83] MAINT: Add missing definition of data types and restructure doc. --- src/porepy/viz/exporter.py | 119 +++++++++++++++++++++++++------------ 1 file changed, 81 insertions(+), 38 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 645240ae96..71d943ab08 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -53,6 +53,76 @@ def check(self, values: Optional[np.ndarray], g: pp.Grid) -> None: raise ValueError("Field " + str(self.name) + " has wrong dimension.") class Exporter: + # TODO DOC + """ + Class for exporting data to vtu files. + + Parameters: + gb: grid bucket (if grid is provided, it will be converted to a grid bucket) + file_name: the root of file name without any extension. + folder_name: (optional) the name of the folder to save the file. + If the folder does not exist it will be created. + + Optional arguments in kwargs: + fixed_grid: (optional) in a time dependent simulation specify if the + grid changes in time or not. The default is True. + binary: export in binary format, default is True. + + How to use: + If you need to export a single grid: + save = Exporter(g, "solution", folder_name="results") + save.write_vtu({"cells_id": cells_id, "pressure": pressure}) + + In a time loop: + save = Exporter(gb, "solution", folder_name="results") + while time: + save.write_vtu({"conc": conc}, time_step=i) + save.write_pvd(steps*deltaT) + + if you need to export the state of variables as stored in the GridBucket: + save = Exporter(gb, "solution", folder_name="results") + # export the field stored in data[pp.STATE]["pressure"] + save.write_vtu(gb, ["pressure"]) + + In a time loop: + while time: + save.write_vtu(["conc"], time_step=i) + save.write_pvd(steps*deltaT) + + In the case of different keywords, change the file name with + "change_name". + + NOTE: the following names are reserved for data exporting: grid_dim, + is_mortar, mortar_side, cell_id + + """ + + # Short cuts for some typing used in Exporter + Edge = Tuple[pp.Grid, pp.Grid] + + # Allowed data structures to define node data + NodeDataInput = Union[ + str, + Tuple[Union[pp.Grid, List[pp.Grid]], str], + Tuple[pp.Grid, str, np.ndarray], + Tuple[str, np.ndarray], + ] + + # Allowed data structures to define edge data + EdgeDataInput = Union[ + str, + Tuple[Union[Edge, List[Edge]], str], + Tuple[Edge, str, np.ndarray], + Tuple[str, np.ndarray], + ] + + # Altogether allowed data structures to define data + DataInput = Union[NodeDataInput, EdgeDataInput] + + # Data structure in which data is stored + NodeData = Dict[Tuple[pp.Grid, str], np.ndarray] + EdgeData = Dict[Tuple[Edge, str], np.ndarray] + def __init__( self, gb: Union[pp.Grid, pp.GridBucket], @@ -60,48 +130,21 @@ def __init__( folder_name: Optional[str] = None, **kwargs, ) -> None: - """ - Class for exporting data to vtu files. + """Initialization of Exporter. Parameters: - gb: grid bucket (if grid is provided, it will be converted to a grid bucket) - file_name: the root of file name without any extension. - folder_name: (optional) the name of the folder to save the file. - If the folder does not exist it will be created. - - Optional arguments in kwargs: - fixed_grid: (optional) in a time dependent simulation specify if the - grid changes in time or not. The default is True. - binary: export in binary format, default is True. - - How to use: - If you need to export a single grid: - save = Exporter(g, "solution", folder_name="results") - save.write_vtu({"cells_id": cells_id, "pressure": pressure}) - - In a time loop: - save = Exporter(gb, "solution", folder_name="results") - while time: - save.write_vtu({"conc": conc}, time_step=i) - save.write_pvd(steps*deltaT) - - if you need to export the state of variables as stored in the GridBucket: - save = Exporter(gb, "solution", folder_name="results") - # export the field stored in data[pp.STATE]["pressure"] - save.write_vtu(gb, ["pressure"]) - - In a time loop: - while time: - save.write_vtu(["conc"], time_step=i) - save.write_pvd(steps*deltaT) - - In the case of different keywords, change the file name with - "change_name". - - NOTE: the following names are reserved for data exporting: grid_dim, - is_mortar, mortar_side, cell_id + gb (Union[pp.Grid, pp.Gridbucket]): grid or gridbucket containing all + mesh information to be exported. + file_name (str): basis for file names used for storing the output + folder_name (str, optional): folder, all files are stored in + kwargs: Optional keywords. 1. fixed_grid (boolean) to control whether + the grid(bucket) may be redfined; 2. binary (boolena) to control + whether the output is using binary or non binary format; + 3. reuse_data (boolean) to control whether geometrical data is + reused from the previous time step when printing to file. """ + # TODO what is the right format for describing kwargs? # Exporter is operating on grid buckets. If a grid is provided, convert to a grid bucket. if isinstance(gb, pp.Grid): From c5a600ec137c5596f42bc5adc05a30daa2b68e7b Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Mon, 9 May 2022 17:51:11 +0200 Subject: [PATCH 16/83] Replace Field with a namedtuple. Make the check explicit. --- src/porepy/viz/exporter.py | 42 +++++++++++--------------------------- 1 file changed, 12 insertions(+), 30 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 71d943ab08..4b97b353f4 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -12,6 +12,7 @@ import sys from typing import Dict, Generator, Iterable, List, Optional, Tuple, Union from xml.etree import ElementTree as ET +from collections import namedtuple import meshio import numpy as np @@ -24,33 +25,8 @@ # Module-wide logger logger = logging.getLogger(__name__) -class Field: - """ - Internal class to store information for the data to export. - """ - - def __init__(self, name: str, values: Optional[np.ndarray] = None) -> None: - # name of the field - self.name = name - self.values = values - - def __repr__(self) -> str: - """ - Repr function - """ - return self.name + " - values: " + str(self.values) - - def check(self, values: Optional[np.ndarray], g: pp.Grid) -> None: - """ - Consistency checks making sure the field self.name is filled and has - the right dimension. - """ - if values is None: - raise ValueError( - "Field " + str(self.name) + " must be filled. It can not be None" - ) - if np.atleast_2d(values).shape[1] != g.num_cells: - raise ValueError("Field " + str(self.name) + " has wrong dimension.") +# Object type to store data to export. +Field = namedtuple('Field', ['name', 'values']) class Exporter: # TODO DOC @@ -737,8 +713,7 @@ def _export_data( # If data has been found, append data to list after stacking # values for different entities if values: - field = Field(key) - field.values = np.hstack(values) + field = Field(key, np.hstack(values)) fields.append(field) # Print data for the particular dimension. Since geometric @@ -1772,7 +1747,14 @@ def _export_list_data(self, data: List[str], time_step: float) -> None: values.append(self.gb.node_props(g, pp.STATE)[field.name]) else: values.append(self.gb.node_props(g, field.name)) - field.check(values[-1], g) + # Check whether the latest value is valid and has size correpsonding to + # the grid size. + if values[-1] is None: + raise ValueError( + "Field " + str(field.name) + " must be filled. It can not be None." + ) + if np.atleast_2d(values[-1]).shape[1] != g.num_cells: + raise ValueError("Field " + str(field.name) + " has wrong dimension.") field.values = np.hstack(values) if self.meshio_geom[dim] is not None: From 65b6a1625ef13161adfc3b6dc64e934064e4d97e Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Tue, 10 May 2022 10:13:31 +0200 Subject: [PATCH 17/83] TST: Update test of exporter. Overview of changes: - Constant fields as grid_dim, grid_node_number, mortar_side, is_mortar are not exported anymore. - Connectivity information is updated due to different definition. - input for write_vtu has been adapted. --- tests/unit/test_vtk.py | 645 +++-------------------------------------- 1 file changed, 36 insertions(+), 609 deletions(-) diff --git a/tests/unit/test_vtk.py b/tests/unit/test_vtk.py index 6b5472f511..467374914c 100644 --- a/tests/unit/test_vtk.py +++ b/tests/unit/test_vtk.py @@ -12,7 +12,6 @@ import numpy as np import porepy as pp - class MeshioExporterTest(unittest.TestCase): def __init__(self, methodName="runTest"): unittest.TestCase.__init__(self, methodName) @@ -32,9 +31,9 @@ def test_single_grid_1d(self): dummy_vector = np.ones((3, g.num_cells)) * g.dim save = pp.Exporter(g, self.file_name, self.folder, binary=False) - save.write_vtu({"dummy_scalar": dummy_scalar, "dummy_vector": dummy_vector}) + save.write_vtu([(g, "dummy_scalar", dummy_scalar), (g, "dummy_vector", dummy_vector)]) - with open(self.folder + self.file_name + ".vtu", "r") as content_file: + with open(self.folder + self.file_name + "_1.vtu", "r") as content_file: content = self.sliceout(content_file.read()) self.assertTrue(content == self._single_grid_1d_grid_vtu()) @@ -46,9 +45,9 @@ def test_single_grid_2d_simplex(self): dummy_vector = np.ones((3, g.num_cells)) * g.dim save = pp.Exporter(g, self.file_name, self.folder, binary=False) - save.write_vtu({"dummy_scalar": dummy_scalar, "dummy_vector": dummy_vector}) + save.write_vtu([(g, "dummy_scalar", dummy_scalar), (g, "dummy_vector", dummy_vector)]) - with open(self.folder + self.file_name + ".vtu", "r") as content_file: + with open(self.folder + self.file_name + "_2.vtu", "r") as content_file: content = self.sliceout(content_file.read()) self.assertTrue(content == self._single_grid_2d_simplex_grid_vtu()) @@ -60,9 +59,9 @@ def test_single_grid_2d_cart(self): dummy_vector = np.ones((3, g.num_cells)) * g.dim save = pp.Exporter(g, self.file_name, self.folder, binary=False) - save.write_vtu({"dummy_scalar": dummy_scalar, "dummy_vector": dummy_vector}) + save.write_vtu([(g, "dummy_scalar", dummy_scalar), (g, "dummy_vector", dummy_vector)]) - with open(self.folder + self.file_name + ".vtu", "r") as content_file: + with open(self.folder + self.file_name + "_2.vtu", "r") as content_file: content = self.sliceout(content_file.read()) self.assertTrue(content == self._single_grid_2d_cart_grid_vtu()) @@ -76,9 +75,9 @@ def test_single_grid_2d_polytop(self): dummy_vector = np.ones((3, g.num_cells)) * g.dim save = pp.Exporter(g, self.file_name, self.folder, binary=False) - save.write_vtu({"dummy_scalar": dummy_scalar, "dummy_vector": dummy_vector}) + save.write_vtu([(g, "dummy_scalar", dummy_scalar), (g, "dummy_vector", dummy_vector)]) - with open(self.folder + self.file_name + ".vtu", "r") as content_file: + with open(self.folder + self.file_name + "_2.vtu", "r") as content_file: content = self.sliceout(content_file.read()) self.assertTrue(content == self._single_grid_2d_polytop_grid_vtu()) @@ -90,9 +89,9 @@ def test_single_grid_3d_simplex(self): dummy_vector = np.ones((3, g.num_cells)) * g.dim save = pp.Exporter(g, self.file_name, self.folder, binary=False) - save.write_vtu({"dummy_scalar": dummy_scalar, "dummy_vector": dummy_vector}) + save.write_vtu([(g, "dummy_scalar", dummy_scalar), (g, "dummy_vector", dummy_vector)]) - with open(self.folder + self.file_name + ".vtu", "r") as content_file: + with open(self.folder + self.file_name + "_3.vtu", "r") as content_file: content = self.sliceout(content_file.read()) self.assertTrue(content == self._single_grid_3d_simplex_grid_vtu()) @@ -104,9 +103,9 @@ def test_single_grid_3d_cart(self): dummy_vector = np.ones((3, g.num_cells)) * g.dim save = pp.Exporter(g, self.file_name, self.folder, binary=False) - save.write_vtu({"dummy_scalar": dummy_scalar, "dummy_vector": dummy_vector}) + save.write_vtu([(g, "dummy_scalar", dummy_scalar), (g, "dummy_vector", dummy_vector)]) - with open(self.folder + self.file_name + ".vtu", "r") as content_file: + with open(self.folder + self.file_name + "_3.vtu", "r") as content_file: content = self.sliceout(content_file.read()) self.assertTrue(content == self._single_grid_3d_cart_grid_vtu()) @@ -122,12 +121,13 @@ def test_single_grid_3d_polytop(self): dummy_vector = np.ones((3, g.num_cells)) * g.dim save = pp.Exporter(g, self.file_name, self.folder, binary=False) - save.write_vtu({"dummy_scalar": dummy_scalar, "dummy_vector": dummy_vector}) + save.write_vtu([(g, "dummy_scalar", dummy_scalar), (g, "dummy_vector", dummy_vector)]) - with open(self.folder + self.file_name + ".vtu", "r") as content_file: + with open(self.folder + self.file_name + "_3.vtu", "r") as content_file: content = self.sliceout(content_file.read()) self.assertTrue(content == self._single_grid_3d_polytop_grid_vtu()) + # NOTE: Suggest removing this test. def test_gb_1(self): gb, _ = pp.grid_buckets_2d.single_horizontal([4, 4], simplex=False) @@ -153,9 +153,10 @@ def test_gb_1(self): content = self.sliceout(content_file.read()) self.assertTrue(content == self._gb_1_grid_2_vtu()) - with open(self.folder + self.file_name + "_mortar_1.vtu", "r") as content_file: - content = self.sliceout(content_file.read()) - self.assertTrue(content == self._gb_1_mortar_grid_vtu()) + # The interface contains only implicit/constant data which is currently not exported. + #with open(self.folder + self.file_name + "_mortar_1.vtu", "r") as content_file: + # content = self.sliceout(content_file.read()) + #self.assertTrue(content == self._gb_1_mortar_grid_vtu()) def test_gb_2(self): gb, _ = pp.grid_buckets_2d.two_intersecting( @@ -197,6 +198,8 @@ def test_gb_2(self): content = self.sliceout(content_file.read()) self.assertTrue(content == self._gb_2_mortar_grid_1_vtu()) + # NOTE: This test is not directly testing Exporter, but the capability to export networks. + # Is this a used capability? If not, suggest removing the test and the corresponding capability. def test_fractures_2d(self): p = np.array([[0, 2, 1, 2, 1], [0, 0, 0, 1, 2]]) e = np.array([[0, 2, 3], [1, 3, 4]]) @@ -218,6 +221,8 @@ def test_fractures_2d(self): content = self.sliceout(content_file.read()) self.assertTrue(content == self._test_fractures_2d_vtu()) + # NOTE: This test is not directly testing Exporter, but the capability to export networks. + # Is this a used capability? If not, suggest removing the test and the corresponding capability. def test_fractures_3d(self): f_1 = pp.Fracture(np.array([[0, 1, 2, 0], [0, 0, 1, 1], [0, 0, 1, 1]])) f_2 = pp.Fracture( @@ -309,12 +314,6 @@ def _single_grid_1d_grid_vtu(self): 1.00000000000e+00 1.00000000000e+00 - - -1 -1 -1 - 0 @@ -566,27 +565,6 @@ def _single_grid_2d_simplex_grid_vtu(self): 2.00000000000e+00 2.00000000000e+00 - - -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 - 0 @@ -878,25 +856,6 @@ def _single_grid_2d_cart_grid_vtu(self): 2.00000000000e+00 2.00000000000e+00 - - -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 - 0 @@ -1015,11 +974,6 @@ def _single_grid_2d_polytop_grid_vtu(self): 2.00000000000e+00 2.00000000000e+00 - - -2 -2 - 0 @@ -2872,171 +2826,6 @@ def _single_grid_3d_simplex_grid_vtu(self): 3.00000000000e+00 3.00000000000e+00 - - -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 - 0 @@ -6563,77 +6352,10 @@ def _single_grid_3d_cart_grid_vtu(self): 3.00000000000e+00 - -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 -3 - - - -0 -1 -2 + +0 +1 +2 3 4 5 @@ -7387,14 +7109,6 @@ def _single_grid_3d_polytop_grid_vtu(self): 3.00000000000e+00 3.00000000000e+00 - - -3 -3 -3 -3 -3 - 3 @@ -7485,34 +7199,6 @@ def _gb_1_grid_1_vtu(self): 1.00000000000e+00 1.00000000000e+00 - - -1 -1 -1 -1 - - - -1 -1 -1 -1 - - - -0 -0 -0 -0 - - - -0 -0 -0 -0 - 0 @@ -7805,82 +7491,6 @@ def _gb_1_grid_2_vtu(self): 2.00000000000e+00 2.00000000000e+00 - - -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 - - - -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 - - - -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 - - - -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 - 0 @@ -7992,17 +7602,6 @@ def _gb_1_mortar_grid_vtu(self): - -1 -1 -1 -1 -1 -1 -1 -1 - - 0 0 @@ -8104,10 +7703,10 @@ def _gb_2_grid_1_vtu(self): 4 4 5 -4 -5 6 7 +8 +9 @@ -8159,42 +7758,6 @@ def _gb_2_grid_1_vtu(self): 1.00000000000e+00 1.00000000000e+00 - - -1 -1 -1 -1 -1 -1 - - - -1 -1 -1 -1 -2 -2 - - - -0 -0 -0 -0 -0 -0 - - - -0 -0 -0 -0 -0 -0 - 0 @@ -8495,82 +8058,6 @@ def _gb_2_grid_2_vtu(self): 2.00000000000e+00 2.00000000000e+00 - - -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 - - - -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 - - - -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 - - - -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 - 0 @@ -8678,22 +8165,22 @@ def _gb_2_mortar_grid_1_vtu(self): 4 4 5 -4 -5 -5 6 7 -8 -8 -9 +7 8 9 10 -11 10 11 12 13 +14 +15 +16 +17 +18 +19 @@ -8757,66 +8244,6 @@ def _gb_2_mortar_grid_1_vtu(self): 0.00000000000e+00 0.00000000000e+00 - - -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 - - - -0 -0 -0 -0 -0 -0 -0 -0 -1 -1 -1 -1 - - - -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 - - - -1 -1 -1 -1 -2 -2 -2 -2 -1 -1 -2 -2 - 0 From e59bdbcbf0a072b9713f382a32350a05d4bee1ad Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 13 May 2022 09:13:22 +0200 Subject: [PATCH 18/83] MAINT: Rename nodes,edges -> subdomains/interfaces. --- src/porepy/viz/exporter.py | 250 ++++++++++++++++++++----------------- 1 file changed, 133 insertions(+), 117 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 4b97b353f4..eca0d25b4e 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -73,11 +73,13 @@ class Exporter: """ - # Short cuts for some typing used in Exporter - Edge = Tuple[pp.Grid, pp.Grid] + # Introduce types used below + + # Interface between subdomains + Interface = Tuple[pp.Grid, pp.Grid] - # Allowed data structures to define node data - NodeDataInput = Union[ + # Allowed data structures to define data on subdomains + SubdomainDataInput = Union[ str, Tuple[Union[pp.Grid, List[pp.Grid]], str], Tuple[pp.Grid, str, np.ndarray], @@ -85,19 +87,19 @@ class Exporter: ] # Allowed data structures to define edge data - EdgeDataInput = Union[ + InterfaceDataInput = Union[ str, - Tuple[Union[Edge, List[Edge]], str], - Tuple[Edge, str, np.ndarray], + Tuple[Union[Interface, List[Interface]], str], + Tuple[Interface, str, np.ndarray], Tuple[str, np.ndarray], ] # Altogether allowed data structures to define data - DataInput = Union[NodeDataInput, EdgeDataInput] + DataInput = Union[SubdomainDataInput, InterfaceDataInput] # Data structure in which data is stored - NodeData = Dict[Tuple[pp.Grid, str], np.ndarray] - EdgeData = Dict[Tuple[Edge, str], np.ndarray] + SubdomainData = Dict[Tuple[pp.Grid, str], np.ndarray] + InterfaceData = Dict[Tuple[Interface, str], np.ndarray] def __init__( self, @@ -260,17 +262,24 @@ def write_vtu( if time_step is not None: self._exported_time_step_file_names.append(time_step) - # Convert provided data to the most general type: NodeDataBaseType + # Convert provided data to the most general type: SubdomainDataBaseType # Also sort wrt. whether data is associated to nodes or edges. - node_data, edge_data = self._unify_data(data) + subdomain_data, interface_data = self._unify_data(data) # Add geometrical info for nodes and edges - node_data = self._add_extra_node_data(node_data) - edge_data = self._add_extra_edge_data(edge_data) + # TODO + reuse_data = self._reuse_data and self._prev_exported_time_step is not None + if reuse_data: + subdomain_data = self._add_extra_subdomain_data(subdomain_data) + interface_data = self._add_extra_interface_data(interface_data) + #subdomain_data = self._add_extra_subdomain_data(subdomain_data) + #interface_data = self._add_extra_interface_data(interface_data) # Export data and treat node and edge data separately - self._export_data(node_data, time_step) - self._export_data(edge_data, time_step) + if subdomain_data: + self._export_data(subdomain_data, time_step) + if interface_data: + self._export_data(interface_data, time_step) # Export pvd file_name = self._make_file_name(self.file_name, time_step, extension=".pvd") @@ -337,7 +346,7 @@ def write_pvd( def _unify_data( self, data: Optional[Union[DataInput, List[DataInput]]] = None, - ) -> Tuple[NodeData, EdgeData]: + ) -> Tuple[SubdomainData, InterfaceData]: """Bring data in unified format to be further used in the export. The routine has two goals: Splitting data into node and edge data, @@ -350,7 +359,7 @@ def _unify_data( of grids/edges. Returns: - Tuple[NodeData, EdgeData]: Node and edge data decomposed and + Tuple[SubdomainData, InterfaceData]: Subdomain and edge data decomposed and brought into unified format. """ @@ -362,8 +371,8 @@ def _unify_data( data = [data] # Initialize container for data associated to nodes - node_data: Dict[NodeDataKey, np.ndarray] = dict() - edge_data: Dict[EdgeDataKey, np.ndarray] = dict() + subdomain_data: Dict[SubdomainDataKey, np.ndarray] = dict() + interface_data: Dict[InterfaceDataKey, np.ndarray] = dict() # Auxiliary function transforming scalar ranged values # to vector ranged values when suitable. @@ -411,7 +420,7 @@ def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: value: np.ndarray = toVectorFormat(d[pp.STATE][key], g) # Add data point in correct format to the collection - node_data[(g, key)] = value + subdomain_data[(g, key)] = value # Try to find the key in the data dictionary associated to edges for e, d in self.gb.edges(): @@ -424,7 +433,7 @@ def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: value: np.ndarray = toVectorFormat(d[pp.STATE][key], mg) # TODO g? # Add data point in correct format to the collection - edge_data[(e, key)] = value + interface_data[(e, key)] = value # Make sure the key exists if not has_key: @@ -454,7 +463,7 @@ def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: value: np.ndarray = toVectorFormat(d[pp.STATE][key], g) # Add data point in correct format to collection - node_data[(g, key)] = value + subdomain_data[(g, key)] = value # Case 2b: Data provided by a tuple ([e], key) where [e] is a single edge # or a list of edges. Idea: Collect the data only among the grids specified. @@ -462,7 +471,7 @@ def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: # By construction, the first component contains grids. # Unify by converting the first component to a list - edges: Union[Edge, List[Edge]] = pt[0] if isinstance(pt[0], list) else [pt[0]] + edges: Union[Interface, List[Interface]] = pt[0] if isinstance(pt[0], list) else [pt[0]] # By construction, the second component contains a key. key: str = pt[1] @@ -481,7 +490,7 @@ def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: value: np.ndarray = toVectorFormat(d[pp.STATE][key], mg) # Add data point in correct format to collection - edge_data[(e, key)] = value + interface_data[(e, key)] = value # TODO include this case at all? It is currently kept as close to the previous use case. @@ -506,7 +515,7 @@ def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: value: np.ndarray = toVectorFormat(pt[1], g) # Add data point in correct format to collection - node_data[(g, key)] = value + subdomain_data[(g, key)] = value # Case 4a: Data provided as tuple (g, key, data). Idea: Since this is # already the desired format, process naturally. @@ -517,65 +526,70 @@ def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: value: np.ndarray = toVectorFormat(pt[2], g) # Add data point in correct format to collection - node_data[(g, key)] = value + subdomain_data[(g, key)] = value # Case 4b: Data provided as tuple (e, key, data). Idea: Since this is # already the desired format, process naturally. elif isTupleOf_entity_str_array(pt, entity = "edge"): # Data point in correct format - e: Edge = pt[0] + e: Interface = pt[0] key: str = pt[1] d = self.gb.edge_props(e) # TODO type mg = d["mortar_grid"] value: np.ndarray = toVectorFormat(pt[2], mg) # Add data point in correct format to collection - edge_data[(e, key)] = value + interface_data[(e, key)] = value else: raise ValueError(f"The provided data type used for {pt} is not supported.") - return node_data, edge_data + return subdomain_data, interface_data - def _add_extra_node_data(self, node_data: NodeData) -> NodeData: - """Enahnce node data with geometrical information. + def _add_extra_subdomain_data(self, subdomain_data: SubdomainData) -> SubdomainData: + """Enhance subdomain data with geometrical information. Parameters: - node_data (NodeData, optional): data contains which will be + subdomain_data (SubdomainData, optional): data contains which will be enhanced Returns: - NodeData: previous node_data with additional information on + SubdomainData: previous subdomain_data with additional information on the grid dimension, mortar side and whether the grid is mortar. """ # All extra fields to be added to the data container - self._extra_node_data = ["grid_dim", "is_mortar", "mortar_side"] + self._extra_subdomain_data = ["grid_dim", "is_mortar", "mortar_side"] # Add info by direct assignment for g, d in self.gb.nodes(): ones = np.ones(g.num_cells, dtype=int) - node_data[(g, "grid_dim")] = g.dim * ones - #node_data[(g, "grid_node_number")] = d["node_number"] * ones # TODO? - node_data[(g, "is_mortar")] = 0 * ones - node_data[(g, "mortar_side")] = pp.grids.mortar_grid.MortarSides.NONE_SIDE.value * ones + subdomain_data[(g, "grid_dim")] = g.dim * ones + #subdomain_data[(g, "grid_node_number")] = d["node_number"] * ones # TODO? + subdomain_data[(g, "is_mortar")] = 0 * ones + subdomain_data[(g, "mortar_side")] = pp.grids.mortar_grid.MortarSides.NONE_SIDE.value * ones + + #TODO include here? + #cell_id = meshio_geom[2] + ## add also the cells ids + #cell_data.update({self.cell_id_key: cell_id}) - return node_data + return subdomain_data - def _add_extra_edge_data(self, edge_data: EdgeData) -> EdgeData: + def _add_extra_interface_data(self, interface_data: InterfaceData) -> InterfaceData: """Enahnce edge data with geometrical information. Parameters: - edge_data (EdgeData, optional): data contains which will be + interface_data (InterfaceData, optional): data contains which will be enhanced Returns: - EdgeData: previous edge_data with additional information on + InterfaceData: previous interface_data with additional information on the grid dimension, the cell ids, the grid edge number, mortar side and whether the grid is mortar. """ # All extra fields to be added to the data container - self._extra_edge_data = ["grid_dim", "cell_id", "grid_edge_number", "is_mortar", "mortar_side"] + self._extra_interface_data = ["grid_dim", "cell_id", "grid_edge_number", "is_mortar", "mortar_side"] # Add info by direct assignment - have to take into account values on both sides # TODO can't we do this more explicit and by that shorter and simpler? @@ -585,11 +599,11 @@ def _add_extra_edge_data(self, edge_data: EdgeData) -> EdgeData: mg = d["mortar_grid"] # Construct empty arrays for all extra edge data - edge_data[(e, "grid_dim")] = np.empty(0, dtype=int) - edge_data[(e, "cell_id")] = np.empty(0, dtype=int) - edge_data[(e, "grid_edge_number")] = np.empty(0, dtype=int) - edge_data[(e, "is_mortar")] = np.empty(0, dtype=int) - edge_data[(e, "mortar_side")] = np.empty(0, dtype=int) + interface_data[(e, "grid_dim")] = np.empty(0, dtype=int) + interface_data[(e, "cell_id")] = np.empty(0, dtype=int) + interface_data[(e, "grid_edge_number")] = np.empty(0, dtype=int) + interface_data[(e, "is_mortar")] = np.empty(0, dtype=int) + interface_data[(e, "mortar_side")] = np.empty(0, dtype=int) # Assign extra edge data by collecting values on both sides. mg_num_cells = 0 @@ -597,52 +611,52 @@ def _add_extra_edge_data(self, edge_data: EdgeData) -> EdgeData: ones = np.ones(g.num_cells, dtype=int) # Grid dimension of the mortar grid - edge_data[(e, "grid_dim")] = np.hstack( + interface_data[(e, "grid_dim")] = np.hstack( ( - edge_data[(e, "grid_dim")], + interface_data[(e, "grid_dim")], g.dim * ones ) ) # Cell ids of the mortar grid - edge_data[(e, "cell_id")] = np.hstack( + interface_data[(e, "cell_id")] = np.hstack( ( - edge_data[(e, "cell_id")], + interface_data[(e, "cell_id")], np.arange(g.num_cells, dtype=int) + mg_num_cells ) ) # Grid edge number of each edge - edge_data[(e, "grid_edge_number")] = np.hstack( + interface_data[(e, "grid_edge_number")] = np.hstack( ( - edge_data[(e, "grid_edge_number")], + interface_data[(e, "grid_edge_number")], d["edge_number"] * ones ) ) # Whether the edge is mortar - edge_data[(e, "is_mortar")] = np.hstack( + interface_data[(e, "is_mortar")] = np.hstack( ( - edge_data[(e, "is_mortar")], + interface_data[(e, "is_mortar")], ones ) ) # Side of the mortar - edge_data[(e, "mortar_side")] = np.hstack( + interface_data[(e, "mortar_side")] = np.hstack( ( - edge_data[(e, "mortar_side")], + interface_data[(e, "mortar_side")], side.value * ones ) ) mg_num_cells += g.num_cells - return edge_data + return interface_data def _export_data( self, - data: Union[NodeData, EdgeData], + data: Union[SubdomainData, InterfaceData], time_step: int ) -> None: """Routine for collecting data associated to a single grid dimension @@ -653,34 +667,36 @@ def _export_data( for interfaces. Parameters: - data (Union[NodeData, EdgeData]): Node or edge data. The routine + data (Union[SubdomainData, InterfaceData]): Subdomain or edge data. The routine notices itself of which type the data is and proceeds accordingly. time_step (int): time_step to be used to append the file name. """ - # Deterimine whether data corresponds to node or edge data. - # Node data and edge data will be treated differently throughout + # Deterimine whether data corresponds to subdomain or interface data. + # Subdomain data and interface data will be treated differently throughout # the export procedure. - is_node_data = all([isTupleOf_entity_str(pt, entity="node") for pt in data]) - is_edge_data = all([isTupleOf_entity_str(pt, entity="edge") for pt in data]) - if not is_node_data and not is_edge_data: - raise ValueError("data has to be consistently either of node or edge data type.") + is_subdomain_data = all([isTupleOf_entity_str(pt, entity="node") for pt in data]) + is_interface_data = all([isTupleOf_entity_str(pt, entity="edge") for pt in data]) + if not is_subdomain_data and not is_interface_data: + raise ValueError("data has to be consistently either of subdomain or interface data type.") # Collect unique keys, and for unique sorting, sort by alphabet keys = list(set([key for _,key in data])) keys.sort() - # Fetch the dimnensions to be traversed. For nodes, fetch the dimensions - # of the available grids, and for edges fetch the dimensions of the available + time_0 = 0 + + # Fetch the dimnensions to be traversed. For subdomains, fetch the dimensions + # of the available grids, and for interfaces fetch the dimensions of the available # mortar grids. - dims = self.dims if is_node_data else self.m_dims + dims = self.dims if is_subdomain_data else self.m_dims # Collect the data and extra data in a single stack for each dimension for dim in dims: # Define the file name depending on data type - if is_node_data: + if is_subdomain_data: file_name = self._make_file_name(self.file_name, time_step, dim) - elif is_edge_data: + elif is_interface_data: file_name = self._make_file_name_mortar( self.file_name, time_step=time_step, dim=dim ) @@ -689,12 +705,12 @@ def _export_data( file_name = self._make_folder(self.folder_name, file_name) # Get all geometrical entities of dimension dim: subdomains - # with correct grid dimension for node data, and interfaces + # with correct grid dimension for subdomain data, and interfaces # with correct mortar grid dimension for edge data. entities: Union[List[pp.Grid], List[Tuple[pp.Grid, pp.Grid]]] = [] - if is_node_data: + if is_subdomain_data: entities = self.gb.get_grids(lambda g: g.dim == dim) - elif is_edge_data: + elif is_interface_data: entities = [e for e, d in self.gb.edges() if d["mortar_grid"].dim == dim] # Construct the list of fields represented on this dimension. @@ -717,8 +733,8 @@ def _export_data( fields.append(field) # Print data for the particular dimension. Since geometric - # info is required distinguish between node and edge data. - meshio_geom = self.meshio_geom[dim] if is_node_data else self.m_meshio_geom[dim] + # info is required distinguish between subdomain and interface data. + meshio_geom = self.meshio_geom[dim] if is_subdomain_data else self.m_meshio_geom[dim] if meshio_geom is not None: self._write(fields, file_name, meshio_geom) @@ -1312,7 +1328,7 @@ def _write(self, fields: Iterable[Field], file_name: str, meshio_geom) -> None: ) # Exclude/block fixed geometric data as grid points and connectivity information - # by removing fields mesh and extra node/edge data from meshio_grid_to_export. + # by removing fields mesh and extra subdomain/interface data from meshio_grid_to_export. # Do this only if explicitly asked in init and if a suitable previous time step # had been exported already. This data will be copied to the final VTKFile # from a reference file from this previous time step. @@ -1323,14 +1339,14 @@ def _write(self, fields: Iterable[Field], file_name: str, meshio_geom) -> None: # Write mesh information and data to VTK format. meshio.write(file_name, meshio_grid_to_export, binary=self.binary) - # Include the fixed mesh data and the extra node/edge data by copy + # Include the fixed mesh data and the extra subdomain/interface data by copy # if remove earlier from meshio_grid_to_export. if to_copy_prev_exported_data: self._copy_fixed_mesh_data(file_name) - def _remove_fixed_mesh_data(self, meshio_grid: meshio._mesh.Mesh) -> meshio._mesh.Mesh: - """Remove points, cells, and cell data related to the extra node and - edge data from given meshio_grid. + def _stash_extra_data(self, meshio_grid: meshio._mesh.Mesh) -> meshio._mesh.Mesh: + """Remove points, cells, and cell data related to the extra subdomain and + interface data from given meshio_grid. Auxiliary routine in _write. @@ -1365,7 +1381,7 @@ def _copy_fixed_mesh_data(self, file_name: str) -> None: """Given two VTK files (in xml format), transfer all fixed geometric info from a reference to a new file. - This includes Piece, Points, Cells, and extra node and edge data. + This includes Piece, Points, Cells, and extra subdomain and interface data. Parameters: file_name(str): Incomplete VTK file, to be completed. @@ -1604,7 +1620,7 @@ def old_write_vtu( # Require a grid bucket. Thus, convert grid -> grid bucket. if isinstance(gb, pp.Grid): - # Create a new grid bucket solely with a single grid as nodes. + # Create a new grid bucket solely with a single grid as subdomains self.gb = pp.GridBucket() self.gb.add_nodes(gb) @@ -1694,39 +1710,39 @@ def _export_list_data(self, data: List[str], time_step: float) -> None: # TODO simplify - # Extract data which is contained in nodes (and not edges). - # IMPLEMENTATION NOTE: We need a unique set of keywords for node_data. The simpler + # Extract data which is attached to subdomains (and not inerfaces). + # IMPLEMENTATION NOTE: We need a unique set of keywords for subdomain_data. The simpler # option would have been to gather all keys and uniquify by converting to a set, # and then back to a list. However, this will make the ordering of the keys random, # and it turned out that this complicates testing (see tests/unit/test_vtk). # It was therefore considered better to use a more complex loop which # (seems to) guarantee a deterministic ordering of the keys. - node_data = list() + subdomain_data = list() # For each element in data, apply a brute force approach and check whether there - # exists a data dictionary associated to a node which contains a state variable + # exists a data dictionary associated to a subdomain which contains a state variable # with same key. If so, add the key and move on to the next key. for key in data: for _, d in self.gb.nodes(): if pp.STATE in d and key in d[pp.STATE]: - node_data.append(key) - # After successfully identifying data contained in nodes, break the loop - # over nodes to avoid any unintended repeated listing of key. + subdomain_data.append(key) + # After successfully identifying data contained in subdomains, break the loop + # over subdomains to avoid any unintended repeated listing of key. break # Transfer data to fields. # TODO Field structure required here? or can we use something standard in Python? # Collect all provided keywords and - node_fields: List[Field] = [] - if len(node_data) > 0: - node_fields.extend([Field(d) for d in node_data]) + subdomain_fields: List[Field] = [] + if len(subdomain_data) > 0: + subdomain_fields.extend([Field(d) for d in subdomain_data]) - # Include consider the grid_bucket node data - extra_node_names = ["grid_dim", "grid_node_number", "is_mortar", "mortar_side"] - extra_node_fields = [Field(name) for name in extra_node_names] - node_fields.extend(extra_node_fields) + # Include consider the grid_bucket subdomain data + extra_subdomain_names = ["grid_dim", "grid_node_number", "is_mortar", "mortar_side"] + extra_subdomain_fields = [Field(name) for name in extra_subdomain_names] + subdomain_fields.extend(extra_subdomain_fields) self.gb.assign_node_ordering(overwrite_existing=False) - self.gb.add_node_props(extra_node_names) + self.gb.add_node_props(extra_subdomain_names) # fill the extra data for g, d in self.gb: ones = np.ones(g.num_cells, dtype=int) @@ -1739,7 +1755,7 @@ def _export_list_data(self, data: List[str], time_step: float) -> None: for dim in self.dims: file_name = self._make_file_name(self.file_name, time_step, dim) file_name = self._make_folder(self.folder_name, file_name) - for field in node_fields: + for field in subdomain_fields: grids = self.gb.get_grids(lambda g: g.dim == dim) values = [] for g in grids: @@ -1758,33 +1774,33 @@ def _export_list_data(self, data: List[str], time_step: float) -> None: field.values = np.hstack(values) if self.meshio_geom[dim] is not None: - self._write(node_fields, file_name, self.meshio_geom[dim]) + self._write(subdomain_fields, file_name, self.meshio_geom[dim]) - self.gb.remove_node_props(extra_node_names) + self.gb.remove_node_props(extra_subdomain_names) # Extract data which is contained in edges (and not nodes). - # IMPLEMENTATION NOTE: See the above loop to construct node_data for an explanation - # of this elaborate construction of `edge_data` - edge_data = list() + # IMPLEMENTATION NOTE: See the above loop to construct subdomain_data for an explanation + # of this elaborate construction of `interface_data` + interface_data = list() for key in data: for _, d in self.gb.edges(): if pp.STATE in d and key in d[pp.STATE]: - edge_data.append(key) + interface_data.append(key) # After successfully identifying data contained in edges, break the loop # over edges to avoid any unintended repeated listing of key. break # Transfer data to fields. - edge_fields: List[Field] = [] - if len(edge_data) > 0: - edge_fields.extend([Field(d) for d in edge_data]) + interface_fields: List[Field] = [] + if len(interface_data) > 0: + interface_fields.extend([Field(d) for d in interface_data]) # consider the grid_bucket edge data - extra_edge_names = ["grid_dim", "grid_edge_number", "is_mortar", "mortar_side"] - extra_edge_fields = [Field(name) for name in extra_edge_names] - edge_fields.extend(extra_edge_fields) + extra_interface_names = ["grid_dim", "grid_edge_number", "is_mortar", "mortar_side"] + extra_interface_fields = [Field(name) for name in extra_interface_names] + interface_fields.extend(extra_interface_fields) - self.gb.add_edge_props(extra_edge_names) + self.gb.add_edge_props(extra_interface_names) # fill the extra data for _, d in self.gb.edges(): d["grid_dim"] = {} @@ -1816,7 +1832,7 @@ def _export_list_data(self, data: List[str], time_step: float) -> None: e for e, d in self.gb.edges() if cond(d) ] - for field in edge_fields: + for field in interface_fields: values = [] for mg, edge in zip(mgs, edges): if field.name in data: @@ -1830,10 +1846,10 @@ def _export_list_data(self, data: List[str], time_step: float) -> None: field.values = np.hstack(values) if self.m_meshio_geom[dim] is not None: - self._write(edge_fields, file_name, self.m_meshio_geom[dim]) + self._write(interface_fields, file_name, self.m_meshio_geom[dim]) file_name = self._make_file_name(self.file_name, time_step, extension=".pvd") file_name = self._make_folder(self.folder_name, file_name) self._export_pvd_gb(file_name, time_step) - self.gb.remove_edge_props(extra_edge_names) + self.gb.remove_edge_props(extra_interface_names) From 96deca0b94bd8d4eed3f4ff5c36bbd7f262e5dbb Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 13 May 2022 09:19:11 +0200 Subject: [PATCH 19/83] MAINT: Remove old routines for writing data to vtu format. --- src/porepy/viz/exporter.py | 280 ------------------------------------- 1 file changed, 280 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index eca0d25b4e..2243d0c1a3 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -1573,283 +1573,3 @@ def _make_file_name_mortar( else: time = str(time_step).zfill(padding) return file_name + str(dim) + "_" + time + extension - - - - - - #################################################### - # TODO - # Remove! - - # TODO do we need dicts here? do we want to support both for grid buckets (this does not make sense) - def old_write_vtu( - self, - data: Optional[Union[Dict, List[str]]] = None, # TODO update - time_dependent: bool = False, # TODO in use? e.g., when not each time step is printed? or is time_step usually used? - time_step: int = None, - gb: Optional[Union[pp.Grid, pp.GridBucket]] = None, - ) -> None: - """ - Interface function to export the grid and additional data with meshio. - - In 1d the cells are represented as lines, 2d the cells as polygon or - triangle/quad, while in 3d as polyhedra/tetrahedra/hexahedra. - In all the dimensions the geometry of the mesh needs to be computed. - - Parameters: - data: if g is a single grid then data is a dictionary (see example) - if g is a grid bucket then list of names for optional data, - they are the keys in the grid bucket (see example). - time_dependent: (boolean, optional) If False, file names will not be appended with - an index that markes the time step. Can be overridden by giving - a value to time_step. - time_step: (optional) in a time dependent problem defines the part of the file - name associated with this time step. If not provided, subsequent - time steps will have file names ending with 0, 1, etc. - grid: (optional) in case of changing grid set a new one. - - """ - - # Update the grid (bucket) but only if allowed, i.e., the initial grid - # (bucket) has not been characterized as fixed. - if self.fixed_grid and gb is not None: - raise ValueError("Inconsistency in exporter setting") - - elif not self.fixed_grid and gb is not None: - - # Require a grid bucket. Thus, convert grid -> grid bucket. - if isinstance(gb, pp.Grid): - # Create a new grid bucket solely with a single grid as subdomains - self.gb = pp.GridBucket() - self.gb.add_nodes(gb) - - else: - self.gb = gb - - # Update geometrical info in meshio format for the updated grid - self._update_meshio_geom() - - # Reset control parameter for the reuse of fixed data in the - # write routine, so the updated mesh is used for further export. - self._prev_exported_time_step = None - - # TODO rm? OK to keep for now! - # If the problem is time dependent, but no time step is set, we set one - # using the updated, internal counter. - if time_dependent and time_step is None: - time_step = self._time_step_counter - self._time_step_counter += 1 - - # If the problem is time dependent (with specified or automatic time step index) - # add the time step to the exported files - if time_step is not None: - self._exported_time_step_file_names.append(time_step) - - # TODO change this! - # NOTE grid buckets are exported with dict data, while single - # grids (converted to grid buckets) are exported with list data. - if isinstance(data, list): - self._export_list_data(data, time_step) # type: ignore - elif isinstance(data, dict): - self._export_dict_data(data, time_step) - else: - raise NotImplemented("No other data type than list and dict supported.") - - # Update previous time step used for export to be used in the next application. - if time_step is not None: - self._prev_exported_time_step = time_step - - def _export_dict_data(self, data: Dict[str, np.ndarray], time_step): - """ - Export single grid to a vtu file with dictionary data. - - Parameters: - data (Dict[str, np.ndarray]): Data to be exported. - time_step (float) : Time step, to be appended at the vtu output file. - """ - # Currently this method does only support single grids. - if self.dims.shape[0] > 1: - raise NotImplementedError("Grid buckets with more than one grid are not supported.") - - # No need of special naming, create the folder - name = self._make_folder(self.folder_name, self.file_name) - name = self._make_file_name(name, time_step) - - # Provide an empty dict if data is None - if data is None: - data = dict() - - # Store the provided data as list of Fields - fields: List[Field] = [] - if len(data) > 0: - fields.extend([Field(n, v) for n, v in data.items()]) - - # Extract grid (it is assumed it is the only one) - grid = [g for g, _ in self.gb.nodes()][0] - - # Add grid dimension to the data - grid_dim = grid.dim * np.ones(grid.num_cells, dtype=int) - fields.extend([Field("grid_dim", grid_dim)]) - - # Write data to file - self._write(fields, name, self.meshio_geom[grid.dim]) - - def _export_list_data(self, data: List[str], time_step: float) -> None: - """Export the entire GridBucket and additional list data to vtu. - - Parameters: - data (List[str]): Data to be exported in addition to default GridBucket data. - time_step (float) : Time step, to be appended at the vtu output file. - """ - # Convert data to list, or provide an empty list - if data is not None: - data = np.atleast_1d(data).tolist() - else: - data = list() - - # TODO simplify - - # Extract data which is attached to subdomains (and not inerfaces). - # IMPLEMENTATION NOTE: We need a unique set of keywords for subdomain_data. The simpler - # option would have been to gather all keys and uniquify by converting to a set, - # and then back to a list. However, this will make the ordering of the keys random, - # and it turned out that this complicates testing (see tests/unit/test_vtk). - # It was therefore considered better to use a more complex loop which - # (seems to) guarantee a deterministic ordering of the keys. - subdomain_data = list() - # For each element in data, apply a brute force approach and check whether there - # exists a data dictionary associated to a subdomain which contains a state variable - # with same key. If so, add the key and move on to the next key. - for key in data: - for _, d in self.gb.nodes(): - if pp.STATE in d and key in d[pp.STATE]: - subdomain_data.append(key) - # After successfully identifying data contained in subdomains, break the loop - # over subdomains to avoid any unintended repeated listing of key. - break - - # Transfer data to fields. - # TODO Field structure required here? or can we use something standard in Python? - # Collect all provided keywords and - subdomain_fields: List[Field] = [] - if len(subdomain_data) > 0: - subdomain_fields.extend([Field(d) for d in subdomain_data]) - - # Include consider the grid_bucket subdomain data - extra_subdomain_names = ["grid_dim", "grid_node_number", "is_mortar", "mortar_side"] - extra_subdomain_fields = [Field(name) for name in extra_subdomain_names] - subdomain_fields.extend(extra_subdomain_fields) - - self.gb.assign_node_ordering(overwrite_existing=False) - self.gb.add_node_props(extra_subdomain_names) - # fill the extra data - for g, d in self.gb: - ones = np.ones(g.num_cells, dtype=int) - d["grid_dim"] = g.dim * ones - d["grid_node_number"] = d["node_number"] * ones - d["is_mortar"] = 0 * ones - d["mortar_side"] = pp.grids.mortar_grid.MortarSides.NONE_SIDE.value * ones - - # collect the data and extra data in a single stack for each dimension - for dim in self.dims: - file_name = self._make_file_name(self.file_name, time_step, dim) - file_name = self._make_folder(self.folder_name, file_name) - for field in subdomain_fields: - grids = self.gb.get_grids(lambda g: g.dim == dim) - values = [] - for g in grids: - if field.name in data: - values.append(self.gb.node_props(g, pp.STATE)[field.name]) - else: - values.append(self.gb.node_props(g, field.name)) - # Check whether the latest value is valid and has size correpsonding to - # the grid size. - if values[-1] is None: - raise ValueError( - "Field " + str(field.name) + " must be filled. It can not be None." - ) - if np.atleast_2d(values[-1]).shape[1] != g.num_cells: - raise ValueError("Field " + str(field.name) + " has wrong dimension.") - field.values = np.hstack(values) - - if self.meshio_geom[dim] is not None: - self._write(subdomain_fields, file_name, self.meshio_geom[dim]) - - self.gb.remove_node_props(extra_subdomain_names) - - # Extract data which is contained in edges (and not nodes). - # IMPLEMENTATION NOTE: See the above loop to construct subdomain_data for an explanation - # of this elaborate construction of `interface_data` - interface_data = list() - for key in data: - for _, d in self.gb.edges(): - if pp.STATE in d and key in d[pp.STATE]: - interface_data.append(key) - # After successfully identifying data contained in edges, break the loop - # over edges to avoid any unintended repeated listing of key. - break - - # Transfer data to fields. - interface_fields: List[Field] = [] - if len(interface_data) > 0: - interface_fields.extend([Field(d) for d in interface_data]) - - # consider the grid_bucket edge data - extra_interface_names = ["grid_dim", "grid_edge_number", "is_mortar", "mortar_side"] - extra_interface_fields = [Field(name) for name in extra_interface_names] - interface_fields.extend(extra_interface_fields) - - self.gb.add_edge_props(extra_interface_names) - # fill the extra data - for _, d in self.gb.edges(): - d["grid_dim"] = {} - d["cell_id"] = {} - d["grid_edge_number"] = {} - d["is_mortar"] = {} - d["mortar_side"] = {} - mg = d["mortar_grid"] - mg_num_cells = 0 - for side, g in mg.side_grids.items(): - ones = np.ones(g.num_cells, dtype=int) - d["grid_dim"][side] = g.dim * ones - d["is_mortar"][side] = ones - d["mortar_side"][side] = side.value * ones - d["cell_id"][side] = np.arange(g.num_cells, dtype=int) + mg_num_cells - mg_num_cells += g.num_cells - d["grid_edge_number"][side] = d["edge_number"] * ones - - # collect the data and extra data in a single stack for each dimension - for dim in self.m_dims: - file_name = self._make_file_name_mortar( - self.file_name, time_step=time_step, dim=dim - ) - file_name = self._make_folder(self.folder_name, file_name) - - mgs = self.gb.get_mortar_grids(lambda g: g.dim == dim) - cond = lambda d: d["mortar_grid"].dim == dim - edges: List[Tuple[pp.Grid, pp.Grid]] = [ - e for e, d in self.gb.edges() if cond(d) - ] - - for field in interface_fields: - values = [] - for mg, edge in zip(mgs, edges): - if field.name in data: - values.append(self.gb.edge_props(edge, pp.STATE)[field.name]) - else: - for side, _ in mg.side_grids.items(): - # Convert edge to tuple to be compatible with GridBucket - # data structure - values.append(self.gb.edge_props(edge, field.name)[side]) - - field.values = np.hstack(values) - - if self.m_meshio_geom[dim] is not None: - self._write(interface_fields, file_name, self.m_meshio_geom[dim]) - - file_name = self._make_file_name(self.file_name, time_step, extension=".pvd") - file_name = self._make_folder(self.folder_name, file_name) - self._export_pvd_gb(file_name, time_step) - - self.gb.remove_edge_props(extra_interface_names) From f136e3892507e86f58bfd78544bec106c2ba0a94 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 13 May 2022 14:40:59 +0200 Subject: [PATCH 20/83] MAINT/DOC: Unify construction of names (construct method for mortars), add doc. --- src/porepy/viz/exporter.py | 76 ++++++++++++++++++++++---------------- 1 file changed, 44 insertions(+), 32 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 2243d0c1a3..4c1c9857dc 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -695,10 +695,12 @@ def _export_data( for dim in dims: # Define the file name depending on data type if is_subdomain_data: - file_name = self._make_file_name(self.file_name, time_step, dim) + file_name = self._make_file_name( + self.file_name, time_step, dim + ) elif is_interface_data: - file_name = self._make_file_name_mortar( - self.file_name, time_step=time_step, dim=dim + file_name = self._make_file_name( + self.file_name + "_mortar_", time_step, dim ) # Append folder name to file name @@ -766,14 +768,14 @@ def _export_pvd_gb(self, file_name: str, time_step: int) -> None: for dim in self.dims: if self.meshio_geom[dim] is not None: o_file.write( - fm % self._make_file_name(self.file_name, time_step, dim=dim) + fm % self._make_file_name(self.file_name, time_step, dim) ) # Same for mortar grids. for dim in self.m_dims: if self.m_meshio_geom[dim] is not None: o_file.write( - fm % self._make_file_name_mortar(self.file_name, dim, time_step) + fm % self._make_file_name(self.file_name + "_mortar_", time_step, dim) ) o_file.write("\n" + "") @@ -1529,47 +1531,57 @@ def _update_meshio_geom(self): self.m_meshio_geom[dim] = self._export_grid(mg, dim) def _make_folder(self, folder_name: Optional[str] = None, name: str = "") -> str: + """Auxiliary method setting up potentially non-existent folder structure. + + Parameters: + folder_name (Optional[str]): name of the folder + name (str): prefix of the name of the files to be written + + Returns: + str: complete path to the files to be written. + """ + + # If no folder_name is prescribed, the files will be stored in the + # same folder. if folder_name is None: return name + # Set up folder structure if not existent. if not os.path.exists(folder_name): os.makedirs(folder_name) + + # Return full path. return os.path.join(folder_name, name) def _make_file_name( self, file_name: str, - time_step: Optional[float] = None, + time_step: Optional[Union[float,int]] = None, dim: Optional[int] = None, extension: str = ".vtu", ) -> str: + """Auxiliary method to setting up file name. - padding = self.padding - if dim is None: # normal grid - if time_step is None: - return file_name + extension - else: - time = str(time_step).zfill(padding) - return file_name + "_" + time + extension - else: # part of a grid bucket - if time_step is None: - return file_name + "_" + str(dim) + extension - else: - time = str(time_step).zfill(padding) - return file_name + "_" + str(dim) + "_" + time + extension + The final name is build as combination of a prescribed prefix, + and possibly the dimension of underlying grid and time (step) + the related data is associated to. - def _make_file_name_mortar( - self, file_name: str, dim: int, time_step: Optional[float] = None - ) -> str: + Parameters: + file_name (str): prefix of the name of file to be exported + time_step (Optional[Union[float int]]): time or time step (index) + dim (Optional[int]): dimension of the exported grid + extension (str): extension of the file, typically file ending + defining the format + + Returns: + str: complete name of file + """ - # we keep the order as in _make_file_name - assert dim is not None + # Define non-empty time step extension including zero padding. + time_extension = "" if time_step is None else "_" + str(time_step).zfill(self.padding) - extension = ".vtu" - file_name = file_name + "_mortar_" - padding = self.padding - if time_step is None: - return file_name + str(dim) + extension - else: - time = str(time_step).zfill(padding) - return file_name + str(dim) + "_" + time + extension + # Define non-empty dim extension + dim_extension = "" if dim is None else "_" + str(dim) + + # Combine prefix and extensions to define the complete name + return file_name + dim_extension + time_extension + extension From 5bc4c01019cbcbaf58418744f2003f3cb19a10b3 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 13 May 2022 14:53:14 +0200 Subject: [PATCH 21/83] MAINT/DOC: Add doc and reorder methods. --- src/porepy/viz/exporter.py | 45 ++++++++++++++++++++++++-------------- 1 file changed, 29 insertions(+), 16 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 4c1c9857dc..cc561bb085 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -781,6 +781,27 @@ def _export_pvd_gb(self, file_name: str, time_step: int) -> None: o_file.write("\n" + "") o_file.close() + def _update_meshio_geom(self) -> None: + """ + Auxiliary method upating internal copies of the grids. Merely asks for + exporting grids. + """ + # Subdomains + for dim in self.dims: + # Get grids with dimension dim + g = self.gb.get_grids(lambda g: g.dim == dim) + # Export and store + self.meshio_geom[dim] = self._export_grid(g, dim) + + # Interfaces + for dim in self.m_dims: + # Extract the mortar grids for dimension dim + mgs = self.gb.get_mortar_grids(lambda g: g.dim == dim) + # It contains the mortar grids "unrolled" by sides + mg = np.array([g for m in mgs for _, g in m.side_grids.items()]) + # Export and store + self.m_meshio_geom[dim] = self._export_grid(mg, dim) + def _export_grid( self, gs: Iterable[pp.Grid], dim: int ) -> Union[None, Tuple[np.ndarray, np.ndarray, np.ndarray]]: @@ -1508,30 +1529,21 @@ def fetch_data_array( # Loop over all extra data to check whether it is included in the reference # file. If so, copy it. - for key in extra_node_edge_names: + for key in extra_data: ref_data_array = fetch_data_array(ref_vtkfile, key, check_existence=False) new_data_array = fetch_data_array(new_vtkfile, key, check_existence=False) + # Copy data only if it is included in the reference file, but not the current. if ref_data_array is not None and new_data_array is None: new_cell_data.append(ref_data_array) - + # Write updates to file tree.write(file_name) - def _update_meshio_geom(self): - for dim in self.dims: - g = self.gb.get_grids(lambda g: g.dim == dim) - self.meshio_geom[dim] = self._export_grid(g, dim) - - for dim in self.m_dims: - # extract the mortar grids for dimension dim - mgs = self.gb.get_mortar_grids(lambda g: g.dim == dim) - # it contains the mortar grids "unrolled" by sides - mg = np.array([g for m in mgs for _, g in m.side_grids.items()]) - self.m_meshio_geom[dim] = self._export_grid(mg, dim) - def _make_folder(self, folder_name: Optional[str] = None, name: str = "") -> str: - """Auxiliary method setting up potentially non-existent folder structure. + """ + Auxiliary method setting up potentially non-existent folder structure and + setting up a path for exporting. Parameters: folder_name (Optional[str]): name of the folder @@ -1560,7 +1572,8 @@ def _make_file_name( dim: Optional[int] = None, extension: str = ".vtu", ) -> str: - """Auxiliary method to setting up file name. + """ + Auxiliary method to setting up file name. The final name is build as combination of a prescribed prefix, and possibly the dimension of underlying grid and time (step) From 6045900277099dcf48617bc45ffc5f5f1d8b81b7 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 13 May 2022 15:02:08 +0200 Subject: [PATCH 22/83] DOC: Update doc of init. --- src/porepy/viz/exporter.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index cc561bb085..4ad12082b9 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -114,16 +114,14 @@ def __init__( gb (Union[pp.Grid, pp.Gridbucket]): grid or gridbucket containing all mesh information to be exported. file_name (str): basis for file names used for storing the output - folder_name (str, optional): folder, all files are stored in - kwargs: Optional keywords. 1. fixed_grid (boolean) to control whether - the grid(bucket) may be redfined; 2. binary (boolena) to control - whether the output is using binary or non binary format; - 3. reuse_data (boolean) to control whether geometrical data is - reused from the previous time step when printing to file. + folder_name (str, optional): folder name, all files are stored in + kwargs (optional): Optional keywords; + 'fixed_grid' (boolean) to control whether the grid(bucket) may be redfined; + 'binary' (boolean) controling whether data is stored in binary format; + 'reuse_data' (boolean) controling whether geometrical data is + reused from the previous time step when exporting to file. """ - # TODO what is the right format for describing kwargs? - # Exporter is operating on grid buckets. If a grid is provided, convert to a grid bucket. if isinstance(gb, pp.Grid): self.gb = pp.GridBucket() From 74a33954c9561619c4bcbc6f5ae412f6e3e90e38 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sat, 14 May 2022 00:35:44 +0200 Subject: [PATCH 23/83] MAINT: Make numba a requirement. --- src/porepy/viz/exporter.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 4ad12082b9..d79e346cbd 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -158,15 +158,13 @@ def __init__( num_m_dims = self.m_dims.size self.m_meshio_geom = dict(zip(self.m_dims, [tuple()] * num_m_dims)) # type: ignore - # Check if numba is installed - self._has_numba: bool = "numba" in sys.modules - # Generate geometrical information in meshio format self._update_meshio_geom() # Counter for time step. Will be used to identify files of individual time step, # unless this is overridden by optional parameters in write self._time_step_counter: int = 0 + # Storage for file name extensions for time steps self._exported_time_step_file_names: List[int] = [] @@ -180,6 +178,11 @@ def __init__( # Parameter to be used in several occasions for adding time stamps. self.padding = 6 + # Require numba + if "numba" not in sys.modules: + raise NotImplementedError("The sorting algorithm requires numba to be installed.") + + # TODO isn't this method obsolete? def change_name(self, file_name: str) -> None: """ Change the root name of the files, useful when different keywords are @@ -230,15 +233,12 @@ def write_vtu( # (bucket) has not been characterized as fixed. if self.fixed_grid and gb is not None: raise ValueError("Inconsistency in exporter setting") - elif not self.fixed_grid and gb is not None: - # Require a grid bucket. Thus, convert grid -> grid bucket. if isinstance(gb, pp.Grid): # Create a new grid bucket solely with a single grid as nodes. self.gb = pp.GridBucket() self.gb.add_nodes(gb) - else: self.gb = gb @@ -1044,10 +1044,7 @@ def _export_2d( cfn = np.ravel(cfn_indices, order="F").reshape(n,-1).T # Sort faces for each cell such that they form a chain. Use a function - # compiled with Numba. Currently, no alternative is provided when Numba - # is not installed. This step is the bottleneck of this routine. - if not self._has_numba: - raise NotImplementedError("The sorting algorithm requires numba to be installed.") + # compiled with Numba. This step is the bottleneck of this routine. cfn = self._sort_point_pairs_numba(cfn).astype(int) # For each cell pick the sorted nodes such that they form a chain and thereby From 8bc05f6c88fb33a10a4bdcdc0ee8328b71e91f0c Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sat, 14 May 2022 00:41:08 +0200 Subject: [PATCH 24/83] MAINT/DOC: Rename routines, and add doc. --- src/porepy/viz/exporter.py | 105 ++++++++++++++++++------------------- 1 file changed, 50 insertions(+), 55 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index d79e346cbd..663482f168 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -204,7 +204,7 @@ def write_vtu( data: Optional[Union[DataInput, List[DataInput]]] = None, time_dependent: Optional[bool] = False, # TODO in use? e.g., when not each time step is printed? or is time_step usually used? time_step: Optional[int] = None, - gb: Optional[Union[pp.Grid, pp.GridBucket]] = None, # TODO what if it is none? + gb: Optional[Union[pp.Grid, pp.GridBucket]] = None, ) -> None: """ Interface function to export the grid and additional data with meshio. @@ -221,7 +221,7 @@ def write_vtu( time_dependent (boolean, optional): If False, file names will not be appended with an index that markes the time step. Can be overwritten by giving a value to time_step; if not, - the file names will subsequently be ending with 0, 1, etc. + the file names will subsequently be ending with 1, 2, etc. time_step (int, optional): will be used ass eppendic to define the file corresponding to this specific time step. gb (Union[pp.Grid, pp.Gridbucket], optional): grid or gridbucket @@ -255,14 +255,14 @@ def write_vtu( time_step = self._time_step_counter self._time_step_counter += 1 - # If the problem is time dependent (with specified or automatic time step index) - # add the time step to the exported files + # If time step is prescribed, store it. if time_step is not None: self._exported_time_step_file_names.append(time_step) - # Convert provided data to the most general type: SubdomainDataBaseType - # Also sort wrt. whether data is associated to nodes or edges. - subdomain_data, interface_data = self._unify_data(data) + # Preprocessing step with two main goals: + # 1. Sort wrt. whether data is associated to subdomains or interfaces. + # 2. Unify data type. + subdomain_data, interface_data = self._sort_and_unify_data(data) # Add geometrical info for nodes and edges # TODO @@ -275,14 +275,14 @@ def write_vtu( # Export data and treat node and edge data separately if subdomain_data: - self._export_data(subdomain_data, time_step) + self._export_data_vtu(subdomain_data, time_step, "subdomain data") if interface_data: - self._export_data(interface_data, time_step) + self._export_data_vtu(interface_data, time_step, "interface data") - # Export pvd + # Export grid bucket to pvd format file_name = self._make_file_name(self.file_name, time_step, extension=".pvd") - file_name = self._make_folder(self.folder_name, file_name) - self._export_pvd_gb(file_name, time_step) + file_name = self._append_folder_name(self.folder_name, file_name) + self._export_gb_pvd(file_name, time_step) # Update previous time step used for export to be used in the next application. if time_step is not None: @@ -319,7 +319,7 @@ def write_pvd( assert file_extension is not None # make mypy happy - o_file = open(self._make_folder(self.folder_name, self.file_name) + ".pvd", "w") + o_file = open(self._append_folder_name(self.folder_name, self.file_name) + ".pvd", "w") b = "LittleEndian" if sys.byteorder == "little" else "BigEndian" c = ' compressor="vtkZLibDataCompressor"' header = ( @@ -342,14 +342,15 @@ def write_pvd( # Some auxiliary routines used in write_vtu() - def _unify_data( + def _sort_and_unify_data( self, data: Optional[Union[DataInput, List[DataInput]]] = None, ) -> Tuple[SubdomainData, InterfaceData]: - """Bring data in unified format to be further used in the export. + """ + Preprocess data. - The routine has two goals: Splitting data into node and edge data, - as well as transforming all data to be exported into dictionaries - of the format {(grid/edge, key): value)}. + The routine has two goals: + 1. Splitting data into subdomain and interface data. + 2. Unify the data format into tuples of grid/edge, name, data array. Parameters: data (Union[DataInput, List[DataInput]], optional): data @@ -652,65 +653,59 @@ def _add_extra_interface_data(self, interface_data: InterfaceData) -> InterfaceD return interface_data - def _export_data( + def _export_data_vtu( self, data: Union[SubdomainData, InterfaceData], - time_step: int + time_step: int, + data_type: str, ) -> None: - """Routine for collecting data associated to a single grid dimension + """ + Collect data associated to a single grid dimension and passing further to the final writing routine. For each fixed dimension, all subdomains of that dimension and the data - related to that subdomains will be exported simultaneously. The same - for interfaces. + related to that subdomains will be exported simultaneously. + The same for interfaces. Parameters: data (Union[SubdomainData, InterfaceData]): Subdomain or edge data. The routine notices itself of which type the data is and proceeds accordingly. - time_step (int): time_step to be used to append the file name. + time_step (int): time_step to be used to append the file name + data_type (str): has to be "subdomain data" or "interface data" indicating the + type of the data. """ - # Deterimine whether data corresponds to subdomain or interface data. - # Subdomain data and interface data will be treated differently throughout - # the export procedure. - is_subdomain_data = all([isTupleOf_entity_str(pt, entity="node") for pt in data]) - is_interface_data = all([isTupleOf_entity_str(pt, entity="edge") for pt in data]) - if not is_subdomain_data and not is_interface_data: - raise ValueError("data has to be consistently either of subdomain or interface data type.") - - # Collect unique keys, and for unique sorting, sort by alphabet - keys = list(set([key for _,key in data])) - keys.sort() - - time_0 = 0 + # Determine whether data corresponds to subdomain or interface data. + assert(data_type in ["subdomain data", "interface data"]) + is_subdomain_data = data_type == "subdomain data" # Fetch the dimnensions to be traversed. For subdomains, fetch the dimensions # of the available grids, and for interfaces fetch the dimensions of the available # mortar grids. dims = self.dims if is_subdomain_data else self.m_dims + # Define file name (extend in case of interface data) + file_name_base: str = self.file_name if is_subdomain_data else self.file_name + "_mortar_" + # Append folder name to file name base + file_name_base: str = self._append_folder_name(self.folder_name, file_name_base) + + # Collect unique keys, and for unique sorting, sort by alphabet + keys = list(set([key for _,key in data])) + keys.sort() + # Collect the data and extra data in a single stack for each dimension for dim in dims: - # Define the file name depending on data type - if is_subdomain_data: - file_name = self._make_file_name( - self.file_name, time_step, dim - ) - elif is_interface_data: - file_name = self._make_file_name( - self.file_name + "_mortar_", time_step, dim - ) - - # Append folder name to file name - file_name = self._make_folder(self.folder_name, file_name) + # Define the file name + file_name: str = self._make_file_name(file_name_base, time_step, dim) # Get all geometrical entities of dimension dim: subdomains # with correct grid dimension for subdomain data, and interfaces # with correct mortar grid dimension for edge data. + # TODO update and simplify when new GridTree structure is in place. entities: Union[List[pp.Grid], List[Tuple[pp.Grid, pp.Grid]]] = [] if is_subdomain_data: entities = self.gb.get_grids(lambda g: g.dim == dim) - elif is_interface_data: + else: entities = [e for e, d in self.gb.edges() if d["mortar_grid"].dim == dim] # Construct the list of fields represented on this dimension. @@ -738,7 +733,7 @@ def _export_data( if meshio_geom is not None: self._write(fields, file_name, meshio_geom) - def _export_pvd_gb(self, file_name: str, time_step: int) -> None: + def _export_gb_pvd(self, file_name: str, time_step: int) -> None: """Routine to export to pvd format and collect all data scattered over several files for distinct grid dimensions. @@ -1535,13 +1530,13 @@ def fetch_data_array( # Write updates to file tree.write(file_name) - def _make_folder(self, folder_name: Optional[str] = None, name: str = "") -> str: + def _append_folder_name(self, folder_name: Optional[str] = None, name: str = "") -> str: """ Auxiliary method setting up potentially non-existent folder structure and setting up a path for exporting. Parameters: - folder_name (Optional[str]): name of the folder + folder_name (str, optional): name of the folder name (str): prefix of the name of the files to be written Returns: @@ -1576,8 +1571,8 @@ def _make_file_name( Parameters: file_name (str): prefix of the name of file to be exported - time_step (Optional[Union[float int]]): time or time step (index) - dim (Optional[int]): dimension of the exported grid + time_step (Union[float int], optional): time or time step (index) + dim (int, optional): dimension of the exported grid extension (str): extension of the file, typically file ending defining the format From 09811b09ad59f80a441daf4616477e523f64bea4 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sat, 14 May 2022 00:42:34 +0200 Subject: [PATCH 25/83] MAINT: Include improved type tests, remove redundant file. --- src/porepy/viz/exporter.py | 120 +++++++++++++++++++++++++++--------- src/porepy/viz/type_test.py | 72 ---------------------- 2 files changed, 92 insertions(+), 100 deletions(-) delete mode 100644 src/porepy/viz/type_test.py diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 663482f168..98855abbd9 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -20,8 +20,6 @@ import porepy as pp -from porepy.viz.type_test import * - # Module-wide logger logger = logging.getLogger(__name__) @@ -392,6 +390,75 @@ def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: return value + # Auxiliary functions for type checking + + def isinstance_interface(e: Tuple[pp.Grid, pp.Grid]) -> bool: + """ + Implementation of isinstance(e, Tuple[pp.Grid, pp.Grid]). + """ + return ( + isinstance(e, tuple) and + len(e) == 2 and + isinstance(e[0], pp.Grid) and + isinstance(e[1], pp.Grid) + ) + + def isinstance_Tuple_subdomain_str(t: Tuple[Union[pp.Grid, List[pp.Grid]], str]) -> bool: + # TODO flip the order + """ + Implementation of isinstance(t, Tuple[Union[pp.Grid, List[pp.Grid]], str]). + + Detects data input of type (g1, "name) and ([g1, g2, ...], "name"), + where g1, g2, ... are subdomains. + """ + # Implementation of isinstance(Tuple[pp.Grid, str], t) + types = list(map(type, t)) + if isinstance(t[0], pp.Grid) and types[1:] == [str]: + return True + # Implementation of isinstance(Tuple[List[pp.Grid], str], t) + elif types == [list, str]: + return all([isinstance(g, pp.Grid) for g in t[0]]) + else: + return False + + def isinstance_Tuple_interface_str(t: Tuple[Union[Tuple[pp.Grid, pp.Grid], List[Tuple[pp.Grid, pp.Grid]]], str]) -> bool: + # TODO flip the order + """ + Implementation of isinstance(t, Tuple[Union[Edge, List[Edge]], str]). + + Detects data input of type (e1, "name) and ([e1, e2, ...], "name"), + where e1, e2, ... are interfaces. + """ + # Implementation of isinstance(t, Tuple[Tuple[pp.Grid, pp.Grid], str]) + if list(map(type, t)) == [tuple, str]: + return isinstance_interface(t[0]) + # Implementation of isinstance(t, Tuple[List[Tuple[pp.Grid, pp.Grid]], str]) + elif list(map(type, t)) == [list, str]: + return all([isinstance_interface(g) for g in pt[0]]) + else: + return False + + def isinstance_Tuple_subdomain_str_array(t: Tuple[pp.Grid, str, np.ndarray]) -> bool: + """ + Implementation of isinstance(t, Tuple[pp.Grid, str, np.ndarray]). + """ + types = list(map(type, t)) + return isinstance(t[0], pp.Grid) and types[1:] == [str, np.ndarray] + + def isinstance_Tuple_interface_str_array(t: Tuple[Tuple[pp.Grid, pp.Grid], str, np.ndarray]) -> bool: + """ + Implementation of isinstance(t, Tuple[Interface, str, np.ndarray]). + """ + return list(map(type, t)) == [tuple, str, np.ndarray] and isinstance_interface(t[0]) + + def isinstance_Tuple_str_array(pt) -> bool: + """ + Implementation if isinstance(pt, Tuple[str, np.ndarray]. + Detects data input of type ("name", value). + """ + # Check whether the tuple is of length 2 and has the right types. + return list(map(type, pt)) == [str, np.ndarray] + # Loop over all data points and convert them collect them in the format # (grid/edge, key, data) for single grids etc. for pt in data: @@ -438,13 +505,14 @@ def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: if not has_key: raise ValueError(f"No data with provided key {key} present in the grid bucket.") - # Case 2a: Data provided by a tuple ([g], key) where [g] is a single grid - # or a list of grids.Idea: Collect the data only among the grids specified. - elif isTupleOf_entity_str(pt, entity="node"): + # Case 2a: Data provided by a tuple (g, key). + # Here, g is a single grid or a list of grids. + # Idea: Collect the data only among the grids specified. + elif isinstance_Tuple_subdomain_str(pt): # By construction, the first component contains grids. # Unify by converting the first component to a list - grids: List[pp.Grid] = pt[0] if isinstance(pt[0], list) else [pt[0]] + grids: List[pp.Grid] = pt[0] # if isinstance(pt[0], list) else [pt[0]] # By construction, the second component contains a key. key: str = pt[1] @@ -456,7 +524,8 @@ def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: d = self.gb.node_props(g) # TODO type # Make sure the data exists. - assert(pp.STATE in d and key in d[pp.STATE]) + if not (pp.STATE in d and key in d[pp.STATE]): + raise ValueError("No state with prescribed key available on selected grids.") # Fetch data and convert to vectorial format if suitable value: np.ndarray = toVectorFormat(d[pp.STATE][key], g) @@ -464,9 +533,10 @@ def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: # Add data point in correct format to collection subdomain_data[(g, key)] = value - # Case 2b: Data provided by a tuple ([e], key) where [e] is a single edge - # or a list of edges. Idea: Collect the data only among the grids specified. - elif isTupleOf_entity_str(pt, entity="edge"): + # Case 2b: Data provided by a tuple (e, key) + # Here, e is a single edge or a list of edges. + # Idea: Collect the data only among the grids specified. + elif isinstance_Tuple_interface_str(pt): # By construction, the first component contains grids. # Unify by converting the first component to a list @@ -491,24 +561,18 @@ def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: # Add data point in correct format to collection interface_data[(e, key)] = value - # TODO include this case at all? It is currently kept as close to the previous use case. + # Case 3: Data provided by a tuple (key, data). + # This case is only well-defined, if the grid bucket contains + # only a single grid. + # Idea: Extract the unique grid and continue as in Case 2. + elif isinstance_Tuple_str_array(pt): - # Case 3: Data provided by a tuple (key, data). Here, the grid is - # implicitly is addressed. This only works if there is just a single - # grid. Idea: Extract the unique grid and continue as in Case 2. - elif isTupleOf_str_array(pt): # Fetch the correct grid. This option is only supported for grid buckets containing a single grid. - - # Collect all grids grids: List[pp.Grid] = [g for g, _ in self.gb.nodes()] - - # Make sure there exists only a single grid + g: pp.Grid = grids[0] if not len(grids)==1: raise ValueError(f"The data type used for {pt} is only supported if the grid bucket only contains a single grid.") - # Extract the unique grid - g = grids[0] - # Fetch remaining ingredients required to define node data element key: str = pt[0] value: np.ndarray = toVectorFormat(pt[1], g) @@ -516,9 +580,9 @@ def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: # Add data point in correct format to collection subdomain_data[(g, key)] = value - # Case 4a: Data provided as tuple (g, key, data). Idea: Since this is - # already the desired format, process naturally. - elif isTupleOf_entity_str_array(pt, entity = "node"): + # Case 4a: Data provided as tuple (g, key, data). + # Idea: Since this is already the desired format, process naturally. + elif isinstance_Tuple_subdomain_str_array(pt): # Data point in correct format g: pp.Grid = pt[0] key: str = pt[1] @@ -527,9 +591,9 @@ def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: # Add data point in correct format to collection subdomain_data[(g, key)] = value - # Case 4b: Data provided as tuple (e, key, data). Idea: Since this is - # already the desired format, process naturally. - elif isTupleOf_entity_str_array(pt, entity = "edge"): + # Case 4b: Data provided as tuple (e, key, data). + # Idea: Since this is already the desired format, process naturally. + elif isinstance_Tuple_interface_str_array(pt): # Data point in correct format e: Interface = pt[0] key: str = pt[1] diff --git a/src/porepy/viz/type_test.py b/src/porepy/viz/type_test.py deleted file mode 100644 index f3180b7c7a..0000000000 --- a/src/porepy/viz/type_test.py +++ /dev/null @@ -1,72 +0,0 @@ -# TODO DOC -import porepy as pp -import numpy as np - -def is_Edge(e) -> bool: - """Implementation if isinstance(g, Tuple[pp.Grid, pp.Grid]).""" - - return ( - isinstance(e, tuple) and - len(e) == 2 and - isinstance(e[0], pp.Grid) and - isinstance(e[1], pp.Grid) - ) - -def isTupleOf_entity_str(pt, entity) -> bool: - """Implementation of isinstance(pt, Tuple[Union[pp.Grid, List[pp.Grid]], str]), if entity = "node", and - isinstance(pt, Tuple[Union[Tuple[pp.Grid, pp.Grid], List[Tuple[pp.Grid, pp.Grid]]], str]) if entity = "edge". - """ - - if entity not in ["node", "edge"]: - raise ValueError(f"Entity type {entity} not supported.") - - return ( - # Check whether the input is a tuple with two entries - isinstance(pt, tuple) and - len(pt) == 2 and - ( - # Check whether the first component is a grid or a list of grids - entity == "node" and - ( - isinstance(pt[0], pp.Grid) or - isinstance(pt[0], list) and all([isinstance(g, pp.Grid) for g in pt[0]]) - ) or - - # Check whether the first component is an edge or a list of edges - entity == "edge" and - ( - is_Edge(pt[0]) or - isinstance(pt[0], list) and all([is_Edge(g) for g in pt[0]]) - ) - ) and - # Check whether the second component is a string - isinstance(pt[1], str) - ) - -def isTupleOf_entity_str_array(pt, entity) -> bool: - """Implementation of isinstance(pt, Tuple[pp.Grid, str, np.ndarray] if entity="node", - and isinstance(pt, Tuple[Tuple[pp.Grid, pp.Grid], str, np.ndarray]) if entity="edge".""" - - if entity not in ["node", "edge"]: - raise ValueError(f"Entity type {entity} not supported.") - - return ( - isinstance(pt, tuple) and - len(pt) == 3 and - ( - entity == "node" and isinstance(pt[0], pp.Grid) or - entity == "edge" and is_Edge(pt[0]) - ) and - isinstance(pt[1], str) and - isinstance(pt[2], np.ndarray) - ) - -def isTupleOf_str_array(pt) -> bool: - """Implementation if isinstance(pt, Tuple[str, np.ndarray].""" - - return ( - isinstance(pt, tuple) and - len(pt) == 2 and - isinstance(pt[0], str) and - isinstance(pt[1], np.ndarray) - ) From 1ebee4b7ce34b3db5a71cedaace09f3d9e0a6aee Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sat, 14 May 2022 00:46:20 +0200 Subject: [PATCH 26/83] MAINT: Simplify definition of types. --- src/porepy/viz/exporter.py | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 98855abbd9..49ca06756b 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -76,26 +76,19 @@ class Exporter: # Interface between subdomains Interface = Tuple[pp.Grid, pp.Grid] - # Allowed data structures to define data on subdomains - SubdomainDataInput = Union[ + # Altogether allowed data structures to define data + DataInput = Union[ str, + # Subdomain specific data types Tuple[Union[pp.Grid, List[pp.Grid]], str], Tuple[pp.Grid, str, np.ndarray], Tuple[str, np.ndarray], - ] - - # Allowed data structures to define edge data - InterfaceDataInput = Union[ - str, + # Interface specific data types Tuple[Union[Interface, List[Interface]], str], Tuple[Interface, str, np.ndarray], - Tuple[str, np.ndarray], ] - # Altogether allowed data structures to define data - DataInput = Union[SubdomainDataInput, InterfaceDataInput] - - # Data structure in which data is stored + # Data structure in which data is stored after preprocessing SubdomainData = Dict[Tuple[pp.Grid, str], np.ndarray] InterfaceData = Dict[Tuple[Interface, str], np.ndarray] From e8c6cc7646e45364fdd81f62bda1375451b20453 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sat, 14 May 2022 00:52:25 +0200 Subject: [PATCH 27/83] MAINT: Simplify vtk tests which involve merely single grids. --- tests/unit/test_vtk.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/unit/test_vtk.py b/tests/unit/test_vtk.py index 467374914c..2b93d2fdc7 100644 --- a/tests/unit/test_vtk.py +++ b/tests/unit/test_vtk.py @@ -31,7 +31,7 @@ def test_single_grid_1d(self): dummy_vector = np.ones((3, g.num_cells)) * g.dim save = pp.Exporter(g, self.file_name, self.folder, binary=False) - save.write_vtu([(g, "dummy_scalar", dummy_scalar), (g, "dummy_vector", dummy_vector)]) + save.write_vtu([("dummy_scalar", dummy_scalar), ("dummy_vector", dummy_vector)]) with open(self.folder + self.file_name + "_1.vtu", "r") as content_file: content = self.sliceout(content_file.read()) @@ -45,7 +45,7 @@ def test_single_grid_2d_simplex(self): dummy_vector = np.ones((3, g.num_cells)) * g.dim save = pp.Exporter(g, self.file_name, self.folder, binary=False) - save.write_vtu([(g, "dummy_scalar", dummy_scalar), (g, "dummy_vector", dummy_vector)]) + save.write_vtu([("dummy_scalar", dummy_scalar), ("dummy_vector", dummy_vector)]) with open(self.folder + self.file_name + "_2.vtu", "r") as content_file: content = self.sliceout(content_file.read()) @@ -59,7 +59,7 @@ def test_single_grid_2d_cart(self): dummy_vector = np.ones((3, g.num_cells)) * g.dim save = pp.Exporter(g, self.file_name, self.folder, binary=False) - save.write_vtu([(g, "dummy_scalar", dummy_scalar), (g, "dummy_vector", dummy_vector)]) + save.write_vtu([("dummy_scalar", dummy_scalar), ("dummy_vector", dummy_vector)]) with open(self.folder + self.file_name + "_2.vtu", "r") as content_file: content = self.sliceout(content_file.read()) @@ -75,7 +75,7 @@ def test_single_grid_2d_polytop(self): dummy_vector = np.ones((3, g.num_cells)) * g.dim save = pp.Exporter(g, self.file_name, self.folder, binary=False) - save.write_vtu([(g, "dummy_scalar", dummy_scalar), (g, "dummy_vector", dummy_vector)]) + save.write_vtu([("dummy_scalar", dummy_scalar), ("dummy_vector", dummy_vector)]) with open(self.folder + self.file_name + "_2.vtu", "r") as content_file: content = self.sliceout(content_file.read()) @@ -89,7 +89,7 @@ def test_single_grid_3d_simplex(self): dummy_vector = np.ones((3, g.num_cells)) * g.dim save = pp.Exporter(g, self.file_name, self.folder, binary=False) - save.write_vtu([(g, "dummy_scalar", dummy_scalar), (g, "dummy_vector", dummy_vector)]) + save.write_vtu([("dummy_scalar", dummy_scalar), ("dummy_vector", dummy_vector)]) with open(self.folder + self.file_name + "_3.vtu", "r") as content_file: content = self.sliceout(content_file.read()) @@ -103,7 +103,7 @@ def test_single_grid_3d_cart(self): dummy_vector = np.ones((3, g.num_cells)) * g.dim save = pp.Exporter(g, self.file_name, self.folder, binary=False) - save.write_vtu([(g, "dummy_scalar", dummy_scalar), (g, "dummy_vector", dummy_vector)]) + save.write_vtu([("dummy_scalar", dummy_scalar), ("dummy_vector", dummy_vector)]) with open(self.folder + self.file_name + "_3.vtu", "r") as content_file: content = self.sliceout(content_file.read()) @@ -121,7 +121,7 @@ def test_single_grid_3d_polytop(self): dummy_vector = np.ones((3, g.num_cells)) * g.dim save = pp.Exporter(g, self.file_name, self.folder, binary=False) - save.write_vtu([(g, "dummy_scalar", dummy_scalar), (g, "dummy_vector", dummy_vector)]) + save.write_vtu([("dummy_scalar", dummy_scalar), ("dummy_vector", dummy_vector)]) with open(self.folder + self.file_name + "_3.vtu", "r") as content_file: content = self.sliceout(content_file.read()) From 4906bf4046f9b7b0343e69bdf7fb7733778637d9 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sat, 14 May 2022 01:15:43 +0200 Subject: [PATCH 28/83] TST: Add new test for exporter testing the capability of addressing merely parts of the grid bucket. --- tests/unit/test_vtk.py | 676 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 676 insertions(+) diff --git a/tests/unit/test_vtk.py b/tests/unit/test_vtk.py index 2b93d2fdc7..e1cad61b05 100644 --- a/tests/unit/test_vtk.py +++ b/tests/unit/test_vtk.py @@ -198,6 +198,51 @@ def test_gb_2(self): content = self.sliceout(content_file.read()) self.assertTrue(content == self._gb_2_mortar_grid_1_vtu()) + def test_gb_3(self): + gb, _ = pp.grid_buckets_2d.two_intersecting( + [4, 4], y_endpoints=[0.25, 0.75], simplex=False + ) + gb.add_node_props(["dummy_scalar", "dummy_vector"]) + + for g, d in gb: + pp.set_state( + d, + { + "dummy_scalar": np.ones(g.num_cells) * g.dim, + "dummy_vector": np.ones((3, g.num_cells)) * g.dim, + }, + ) + + for e, d in gb.edges(): + g = d["mortar_grid"] + pp.set_state( + d, + { + "dummy_scalar": np.zeros(g.num_cells), + "unique_dummy_scalar": np.zeros(g.num_cells), + }, + ) + + subdomains_1d = gb.get_grids(lambda g: g.dim==1).tolist() + subdomains_2d = gb.get_grids(lambda g: g.dim==2).tolist() + g_2d = subdomains_2d[0] + interfaces = [e for e, d in gb.edges() if d["mortar_grid"].dim == 1] + + save = pp.Exporter(gb, self.file_name, self.folder, binary=False) + save.write_vtu([(subdomains_1d, "dummy_scalar"), "dummy_vector", "unique_dummy_scalar", (g_2d, "cc", g_2d.cell_centers)]) + + with open(self.folder + self.file_name + "_1.vtu", "r") as content_file: + content = self.sliceout(content_file.read()) + self.assertTrue(content == self._gb_3_grid_1_vtu()) + + with open(self.folder + self.file_name + "_2.vtu", "r") as content_file: + content = self.sliceout(content_file.read()) + self.assertTrue(content == self._gb_3_grid_2_vtu()) + + with open(self.folder + self.file_name + "_mortar_1.vtu", "r") as content_file: + content = self.sliceout(content_file.read()) + self.assertTrue(content == self._gb_3_mortar_grid_1_vtu()) + # NOTE: This test is not directly testing Exporter, but the capability to export networks. # Is this a used capability? If not, suggest removing the test and the corresponding capability. def test_fractures_2d(self): @@ -8259,6 +8304,637 @@ def _gb_2_mortar_grid_1_vtu(self): 10 11 + + + + + +""" + + def _gb_3_grid_1_vtu(self): + return """ + + + + + + +1.00000000000e+00 +5.00000000000e-01 +-5.55111512313e-17 +7.50000000000e-01 +5.00000000000e-01 +-2.77555756156e-17 +5.00000000000e-01 +5.00000000000e-01 +0.00000000000e+00 +5.00000000000e-01 +5.00000000000e-01 +0.00000000000e+00 +2.50000000000e-01 +5.00000000000e-01 +2.77555756156e-17 +0.00000000000e+00 +5.00000000000e-01 +5.55111512313e-17 +5.00000000000e-01 +7.50000000000e-01 +-2.77555756156e-17 +5.00000000000e-01 +5.00000000000e-01 +0.00000000000e+00 +5.00000000000e-01 +5.00000000000e-01 +0.00000000000e+00 +5.00000000000e-01 +2.50000000000e-01 +2.77555756156e-17 + + + + + +0 +1 +1 +2 +3 +4 +4 +5 +6 +7 +8 +9 + + + +2 +4 +6 +8 +10 +12 + + + +3 +3 +3 +3 +3 +3 + + + + + +1.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 + + + +1.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 + + + +0 +1 +2 +3 +4 +5 + + + + + + +""" + + def _gb_3_grid_2_vtu(self): + return """ + + + + + + +0.00000000000e+00 +0.00000000000e+00 +0.00000000000e+00 +2.50000000000e-01 +0.00000000000e+00 +0.00000000000e+00 +5.00000000000e-01 +0.00000000000e+00 +0.00000000000e+00 +7.50000000000e-01 +0.00000000000e+00 +0.00000000000e+00 +1.00000000000e+00 +0.00000000000e+00 +0.00000000000e+00 +0.00000000000e+00 +2.50000000000e-01 +0.00000000000e+00 +2.50000000000e-01 +2.50000000000e-01 +0.00000000000e+00 +5.00000000000e-01 +2.50000000000e-01 +0.00000000000e+00 +7.50000000000e-01 +2.50000000000e-01 +0.00000000000e+00 +1.00000000000e+00 +2.50000000000e-01 +0.00000000000e+00 +0.00000000000e+00 +5.00000000000e-01 +0.00000000000e+00 +0.00000000000e+00 +5.00000000000e-01 +0.00000000000e+00 +2.50000000000e-01 +5.00000000000e-01 +0.00000000000e+00 +2.50000000000e-01 +5.00000000000e-01 +0.00000000000e+00 +5.00000000000e-01 +5.00000000000e-01 +0.00000000000e+00 +5.00000000000e-01 +5.00000000000e-01 +0.00000000000e+00 +5.00000000000e-01 +5.00000000000e-01 +0.00000000000e+00 +5.00000000000e-01 +5.00000000000e-01 +0.00000000000e+00 +7.50000000000e-01 +5.00000000000e-01 +0.00000000000e+00 +7.50000000000e-01 +5.00000000000e-01 +0.00000000000e+00 +1.00000000000e+00 +5.00000000000e-01 +0.00000000000e+00 +1.00000000000e+00 +5.00000000000e-01 +0.00000000000e+00 +0.00000000000e+00 +7.50000000000e-01 +0.00000000000e+00 +2.50000000000e-01 +7.50000000000e-01 +0.00000000000e+00 +5.00000000000e-01 +7.50000000000e-01 +0.00000000000e+00 +7.50000000000e-01 +7.50000000000e-01 +0.00000000000e+00 +1.00000000000e+00 +7.50000000000e-01 +0.00000000000e+00 +0.00000000000e+00 +1.00000000000e+00 +0.00000000000e+00 +2.50000000000e-01 +1.00000000000e+00 +0.00000000000e+00 +5.00000000000e-01 +1.00000000000e+00 +0.00000000000e+00 +7.50000000000e-01 +1.00000000000e+00 +0.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 +0.00000000000e+00 + + + + + +0 +5 +6 +1 +1 +6 +7 +2 +2 +7 +8 +3 +3 +8 +9 +4 +5 +10 +12 +6 +6 +12 +14 +7 +7 +15 +18 +8 +8 +18 +20 +9 +11 +22 +23 +13 +13 +23 +24 +16 +17 +24 +25 +19 +19 +25 +26 +21 +22 +27 +28 +23 +23 +28 +29 +24 +24 +29 +30 +25 +25 +30 +31 +26 + + + +4 +8 +12 +16 +20 +24 +28 +32 +36 +40 +44 +48 +52 +56 +60 +64 + + + +9 +9 +9 +9 +9 +9 +9 +9 +9 +9 +9 +9 +9 +9 +9 +9 + + + + + +1.25000000000e-01 +1.25000000000e-01 +0.00000000000e+00 +3.75000000000e-01 +1.25000000000e-01 +0.00000000000e+00 +6.25000000000e-01 +1.25000000000e-01 +0.00000000000e+00 +8.75000000000e-01 +1.25000000000e-01 +0.00000000000e+00 +1.25000000000e-01 +3.75000000000e-01 +0.00000000000e+00 +3.75000000000e-01 +3.75000000000e-01 +0.00000000000e+00 +6.25000000000e-01 +3.75000000000e-01 +0.00000000000e+00 +8.75000000000e-01 +3.75000000000e-01 +0.00000000000e+00 +1.25000000000e-01 +6.25000000000e-01 +0.00000000000e+00 +3.75000000000e-01 +6.25000000000e-01 +0.00000000000e+00 +6.25000000000e-01 +6.25000000000e-01 +0.00000000000e+00 +8.75000000000e-01 +6.25000000000e-01 +0.00000000000e+00 +1.25000000000e-01 +8.75000000000e-01 +0.00000000000e+00 +3.75000000000e-01 +8.75000000000e-01 +0.00000000000e+00 +6.25000000000e-01 +8.75000000000e-01 +0.00000000000e+00 +8.75000000000e-01 +8.75000000000e-01 +0.00000000000e+00 + + + +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 + + + +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 + + + + + + +""" + + def _gb_3_mortar_grid_1_vtu(self): + return """ + + + + + + +1.00000000000e+00 +5.00000000000e-01 +-5.55111512313e-17 +7.50000000000e-01 +5.00000000000e-01 +-2.77555756156e-17 +5.00000000000e-01 +5.00000000000e-01 +0.00000000000e+00 +5.00000000000e-01 +5.00000000000e-01 +0.00000000000e+00 +2.50000000000e-01 +5.00000000000e-01 +2.77555756156e-17 +0.00000000000e+00 +5.00000000000e-01 +5.55111512313e-17 +1.00000000000e+00 +5.00000000000e-01 +-5.55111512313e-17 +7.50000000000e-01 +5.00000000000e-01 +-2.77555756156e-17 +5.00000000000e-01 +5.00000000000e-01 +0.00000000000e+00 +5.00000000000e-01 +5.00000000000e-01 +0.00000000000e+00 +2.50000000000e-01 +5.00000000000e-01 +2.77555756156e-17 +0.00000000000e+00 +5.00000000000e-01 +5.55111512313e-17 +5.00000000000e-01 +7.50000000000e-01 +-2.77555756156e-17 +5.00000000000e-01 +5.00000000000e-01 +0.00000000000e+00 +5.00000000000e-01 +5.00000000000e-01 +0.00000000000e+00 +5.00000000000e-01 +2.50000000000e-01 +2.77555756156e-17 +5.00000000000e-01 +7.50000000000e-01 +-2.77555756156e-17 +5.00000000000e-01 +5.00000000000e-01 +0.00000000000e+00 +5.00000000000e-01 +5.00000000000e-01 +0.00000000000e+00 +5.00000000000e-01 +2.50000000000e-01 +2.77555756156e-17 + + + + + +0 +1 +1 +2 +3 +4 +4 +5 +6 +7 +7 +8 +9 +10 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 + + + +2 +4 +6 +8 +10 +12 +14 +16 +18 +20 +22 +24 + + + +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 + + + + + +0.00000000000e+00 +0.00000000000e+00 +0.00000000000e+00 +0.00000000000e+00 +0.00000000000e+00 +0.00000000000e+00 +0.00000000000e+00 +0.00000000000e+00 +0.00000000000e+00 +0.00000000000e+00 +0.00000000000e+00 +0.00000000000e+00 + + + +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 + From 7430d70d07f86d35d50fec185f9c01fd0b10b628 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sat, 14 May 2022 11:28:05 +0200 Subject: [PATCH 29/83] TST: Replace vtk test for 2d polytop grids. Base the test now on a grid which contains simplex, quad, and polytop cells, testing the compatibility with mixed type grids. --- tests/unit/test_vtk.py | 55 +++++++++++++++++++++++------------------- 1 file changed, 30 insertions(+), 25 deletions(-) diff --git a/tests/unit/test_vtk.py b/tests/unit/test_vtk.py index e1cad61b05..87260dd7d6 100644 --- a/tests/unit/test_vtk.py +++ b/tests/unit/test_vtk.py @@ -66,9 +66,9 @@ def test_single_grid_2d_cart(self): self.assertTrue(content == self._single_grid_2d_cart_grid_vtu()) def test_single_grid_2d_polytop(self): - g = pp.CartGrid([3, 2], [1] * 2) + g = pp.StructuredTriangleGrid([2] * 2, [1] * 2) g.compute_geometry() - pp.coarsening.generate_coarse_grid(g, [0, 0, 1, 0, 1, 1]) + pp.coarsening.generate_coarse_grid(g, [0, 1, 3, 3, 1, 1, 2, 2]) g.compute_geometry() dummy_scalar = np.ones(g.num_cells) * g.dim @@ -932,16 +932,13 @@ def _single_grid_2d_polytop_grid_vtu(self): - + 0.00000000000e+00 0.00000000000e+00 0.00000000000e+00 -3.33333333333e-01 -0.00000000000e+00 -0.00000000000e+00 -6.66666666667e-01 +5.00000000000e-01 0.00000000000e+00 0.00000000000e+00 1.00000000000e+00 @@ -950,10 +947,7 @@ def _single_grid_2d_polytop_grid_vtu(self): 0.00000000000e+00 5.00000000000e-01 0.00000000000e+00 -3.33333333333e-01 5.00000000000e-01 -0.00000000000e+00 -6.66666666667e-01 5.00000000000e-01 0.00000000000e+00 1.00000000000e+00 @@ -962,10 +956,7 @@ def _single_grid_2d_polytop_grid_vtu(self): 0.00000000000e+00 1.00000000000e+00 0.00000000000e+00 -3.33333333333e-01 -1.00000000000e+00 -0.00000000000e+00 -6.66666666667e-01 +5.00000000000e-01 1.00000000000e+00 0.00000000000e+00 1.00000000000e+00 @@ -977,30 +968,34 @@ def _single_grid_2d_polytop_grid_vtu(self): 0 +1 +4 4 -8 -9 5 -6 -2 +8 +7 1 2 -6 5 -9 -10 -11 -7 +4 +0 3 +6 +7 +4 -8 +3 +7 +11 16 -7 +5 +9 +9 7 @@ -1009,6 +1004,8 @@ def _single_grid_2d_polytop_grid_vtu(self): 2.00000000000e+00 2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 @@ -1018,10 +1015,18 @@ def _single_grid_2d_polytop_grid_vtu(self): 2.00000000000e+00 2.00000000000e+00 2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 0 +2 +3 1 From 0b3ef8fb45c717540666ce979510c79cc46ee503 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sun, 15 May 2022 10:16:07 +0200 Subject: [PATCH 30/83] FIX: Use correct suffix for mortar grids. --- src/porepy/viz/exporter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 49ca06756b..3eb7d379b2 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -742,7 +742,7 @@ def _export_data_vtu( dims = self.dims if is_subdomain_data else self.m_dims # Define file name (extend in case of interface data) - file_name_base: str = self.file_name if is_subdomain_data else self.file_name + "_mortar_" + file_name_base: str = self.file_name if is_subdomain_data else self.file_name + "_mortar" # Append folder name to file name base file_name_base: str = self._append_folder_name(self.folder_name, file_name_base) From 5110a1b302ab6d1a304774b9ae04d82a9bc7e3f5 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sun, 15 May 2022 10:24:00 +0200 Subject: [PATCH 31/83] MAINT/TST: Do not treat hexahedra as polyhedra anymore, adapt test --- src/porepy/viz/exporter.py | 192 ++- tests/unit/test_vtk.py | 2670 +++++------------------------------- 2 files changed, 451 insertions(+), 2411 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 3eb7d379b2..0f96ecc9e1 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -879,14 +879,15 @@ def _export_grid( raise ValueError(f"Unknown dimension {dim}") def _export_1d( - self, gs: Iterable[pp.Grid] + self, + gs: Iterable[pp.Grid] ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ Export the geometrical data (point coordinates) and connectivity information from the 1d PorePy grids to meshio. Parameters: - gs (Iterable[pp.Grid]): 1d subdomains. + gs (Iterable[pp.Grid]): 1d grids. Returns: Tuple[np.ndarray, np.ndarray, np.ndarray]: Points, 1d cells (storing the @@ -947,42 +948,48 @@ def _export_1d( # Return final meshio data: points, cell (connectivity), cell ids return meshio_pts, meshio_cells, meshio_cell_id - def _simplex_cell_to_nodes(self, n: int, g: pp.Grid, cells: Optional[np.ndarray] = None) -> np.ndarray: - """Determine cell to node connectivity for a n-simplex. + def _simplex_cell_to_nodes( + self, + n: int, + g: pp.Grid, + cells: Optional[np.ndarray] = None + ) -> np.ndarray: + """ + Determine cell to node connectivity for a general n-simplex mesh. Parameters: n (int): order of the simplices in the grid. g (pp.Grid): grid containing cells and nodes. - cells (np.ndarray, optional): all simplex cells + cells (np.ndarray, optional): all n-simplex cells Returns: np.ndarray: cell to node connectivity array, in which for each row - all nodes are given. + (associated to cells) all associated nodes are marked. """ - # Determine cell-node ptr cn_indptr = g.cell_nodes().indptr[:-1] if cells is None else g.cell_nodes().indptr[cells] - # Collect the indptr to all nodes of the cell; each cell contains n nodes + # Collect the indptr to all nodes of the cell; each n-simplex cell contains n nodes expanded_cn_indptr = np.vstack([cn_indptr + i for i in range(n)]).reshape(-1, order="F") # Detect all corresponding nodes by applying the expanded mask to the indices expanded_cn_indices = g.cell_nodes().indices[expanded_cn_indptr] - # Bring in correct form. + # Convert to right format. cn_indices = np.reshape(expanded_cn_indices, (-1,n), order="C") return cn_indices def _export_2d( - self, gs: Iterable[pp.Grid] + self, + gs: Iterable[pp.Grid] ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ Export the geometrical data (point coordinates) and connectivity information from the 2d PorePy grids to meshio. Parameters: - gs (Iterable[pp.Grid]): 2d subdomains. + gs (Iterable[pp.Grid]): 2d grids. Returns: Tuple[np.ndarray, np.ndarray, np.ndarray]: Points, 2d cells (storing the @@ -1120,9 +1127,9 @@ def _export_2d( # For each cell_type store the connectivity pattern cell_to_nodes for # the corresponding cells with ids from cell_id. for block, (cell_type, cell_block) in enumerate(cell_to_nodes.items()): - # Meshio does not accept polygon{n} cell types. Remove {n} if - # cell type not special geometric object. - cell_type_meshio_format = polygon_map.get(f"polygon{n}", "polygon") + # Meshio does not accept polygon{n} cell types if it does not + # correspeond to a special geometric object. + cell_type_meshio_format = "polygon" if "polygon" in cell_type else cell_type meshio_cells[block] = meshio.CellBlock( cell_type_meshio_format, cell_block.astype(int) @@ -1132,11 +1139,12 @@ def _export_2d( # Return final meshio data: points, cell (connectivity), cell ids return meshio_pts, meshio_cells, meshio_cell_id - def _sort_point_pairs_numba(self, lines): - """This a simplified copy of pp.utils.sort_points.sort_point_pairs. + def _sort_point_pairs_numba(self, lines: np.ndarray) -> np.ndarray: + """ + This a simplified copy of pp.utils.sort_points.sort_point_pairs. Essentially the same functionality as sort_point_pairs, but stripped down - to the special case of circular chains. Different to sort_point_pairs, this + to the special case of circular chains. Differently to sort_point_pairs, this variant sorts an arbitrary amount of independent point pairs. The chains are restricted by the assumption that each contains equally many line segments. Finally, this routine uses numba. @@ -1149,15 +1157,17 @@ def _sort_point_pairs_numba(self, lines): Returns: np.ndarray: Sorted version of lines, where for each chain, the collection - of line segments has been potentially flipped and sorted. + of line segments has been potentially flipped and sorted. """ import numba @numba.jit(nopython=True) def _function_to_compile(lines): - """Copy of pp.utils.sort_points.sort_point_pairs. This version is extended - to multiple chains. Each chain is implicitly assumed to be circular.""" + """ + Copy of pp.utils.sort_points.sort_point_pairs. This version is extended + to multiple chains. Each chain is implicitly assumed to be circular. + """ # Retrieve number of chains and lines per chain from the shape. # Implicitly expect that all chains have the same length @@ -1223,67 +1233,103 @@ def _export_3d( ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ Export the geometrical data (point coordinates) and connectivity - information from the 3d PorePy grids to meshio. + information from 3d PorePy grids to meshio. Parameters: - gs (Iterable[pp.Grid]): 3d subdomains. + gs (Iterable[pp.Grid]): 3d grids. Returns: Tuple[np.ndarray, np.ndarray, np.ndarray]: Points, 3d cells (storing the connectivity), and cell ids in correct meshio format. """ - # Use standard name for simple object types: in this routine only tetra cells - # are treated in a special manner. - polyhedron_map = {"polyhedron4": "tetra"} + # FIXME The current implementation on first sight could suggest that + # grids with varying cell types are allowed. However, this is not the + # case. Rewriting the code makeing this distinction more clear would + # allow for better overview. - # Dictionaries storing cell->faces and cell->nodes connectivity information - # for all cell types. For this, the nodes have to be sorted such that - # they form a circular chain, describing the boundary of the cell. + # Three different cell types will be distinguished: Tetrahedra, hexahedra, + # and general polyhedra. meshio does not allow for a mix of cell types + # in 3d within a single grid. Thus, if a grid contains cells of varying + # types, all cells will be casted as polyhedra. + + # Dictionaries storing cell->faces and cell->nodes connectivity + # information for all cell types. For the special meshio geometric + # types "tetra" and "hexahedron", cell->nodes will be used; the + # local node ordering for each cell has to comply with the convention + # in meshio. For the general "polyhedron" type, instead the connectivity + # will be described by prescribing cell->faces->nodes connectivity, i.e., + # for each cell, all faces are listed, and for all these faces, its + # nodes are identified, ordered such that they describe a circular chain. cell_to_faces: Dict[str, List[List[int]]] = {} cell_to_nodes: Dict[str, np.ndarray] = {} + # Dictionary collecting all cell ids for each cell type. cell_id: Dict[str, List[int]] = {} - # Data structure for storing node coordinates of all 2d grids. + # Data structure for storing node coordinates of all 3d grids. num_pts = np.sum([g.num_nodes for g in gs]) meshio_pts = np.empty((num_pts, 3)) # type: ignore - # Initialize offsets. Required taking into account multiple 2d grids. + # Initialize offsets. Required taking into account multiple 3d grids. nodes_offset = 0 cell_offset = 0 - # Loop over all 3d grids + # Treat each 3d grid separately. for g in gs: # Store node coordinates sl = slice(nodes_offset, nodes_offset + g.num_nodes) meshio_pts[sl, :] = g.nodes.T - # Determine cell types based on number of nodes per cell. + # The number of faces per cell wil be later used to determining + # the cell types num_faces_per_cell = g.cell_faces.getnnz(axis=0) # Loop over all available cell types and group cells of one type. g_cell_map = dict() for n in np.unique(num_faces_per_cell): - # Define cell type; check if it coincides with a predefined cell type - cell_type = polyhedron_map.get(f"polyhedron{n}", f"polyhedron{n}") + + # Define the cell type. + # Make sure that either all cells are identified as "tetra", + # "hexahedron", or "polyehdron". "tetra" grids have the unique + # property that all cells have 4 faces; "hexahdron" grids in + # PorePy are solely of CartGrid type, and all cells have 6 + # faces (NOTE that coarsening a grid does not change its type, + # hence checking solely the type is not sufficient); all other + # grids will be identified as "polyehdron" grids, even if they + # contain single "tetra" cells. + if n==4 and len(set(num_faces_per_cell)) == 1: + cell_type = "tetra" + elif isinstance(g, pp.CartGrid) and n==6 and len(set(num_faces_per_cell)) == 1: + cell_type = "hexahedron" + else: + cell_type = f"polyhedron{n}" + # Find all cells with n faces, and store for later use cells = np.nonzero(num_faces_per_cell == n)[0] g_cell_map[cell_type] = cells + # Store cell ids in global container; init if entry not yet established if cell_type not in cell_id: cell_id[cell_type] = [] + # Add offset taking into account previous grids cell_id[cell_type] += (cells + cell_offset).tolist() - # Determine connectivity. Loop over available cell types, and treat tetrahedra - # and general polyhedra differently aiming for optimized performance. - + # Determine local connectivity for all cells. Due to differnt + # treatment of special and general cell types and to conform + # with array sizes (for polyhedra), treat each cell type + # separately. for n in np.unique(num_faces_per_cell): - # Define the cell type - cell_type = polyhedron_map.get(f"polyhedron{n}", f"polyhedron{n}") + # Define the cell type as above + if n==4 and len(set(num_faces_per_cell)) == 1: + cell_type = "tetra" + elif isinstance(g, pp.CartGrid) and n==6 and len(set(num_faces_per_cell)) == 1: + cell_type = "hexahedron" + else: + cell_type = f"polyhedron{n}" # Special case: Tetrahedra if cell_type == "tetra": @@ -1293,21 +1339,70 @@ def _export_3d( # ordered in any specific type, as the geometrical object is invariant under # permutations. - # Fetch triangle cells + # Fetch tetra cells cells = g_cell_map[cell_type] + # Tetrahedra are simplices and have a trivial connectivity. cn_indices = self._simplex_cell_to_nodes(4, g, cells) # Initialize data structure if not available yet if cell_type not in cell_to_nodes: cell_to_nodes[cell_type] = np.empty((0,4), dtype=int) + + # Store cell-node connectivity, and add offset taking into account previous grids + cell_to_nodes[cell_type] = np.vstack( + (cell_to_nodes[cell_type], cn_indices + nodes_offset) + ) + + elif cell_type == "hexahedron": + # NOTE: Optimally, a StructuredGrid would be exported in this case. + # However, meshio does solely handle unstructured grids and cannot + # for instance write to *.vtr format. + + # Similar to "tetra", "hexahedron" is a meshio-known cell type. + # Thus, the local connectivity is prescribed by cell-nodes + # information. + + # After all, the procedure is fairly similar to the treatment of + # tetra cells. Most of the following code is analogous to + # _simplex_cell_to_nodes(). However, for hexaedron cells the order + # of the nodes is important and has to be adjusted. Based on the + # specific definition of TensorGrids however, the node numbering + # can be hardcoded, which results in better performance compared + # to a modular procedure. + + # Need to sort the nodes according to the convention within meshio. + # Fetch hexahedron cells + cells = g_cell_map[cell_type] + + # Determine cell-node ptr + cn_indptr = g.cell_nodes().indptr[:-1] if cells is None else g.cell_nodes().indptr[cells] + + # Collect the indptr to all nodes of the cell; each n-simplex cell contains n nodes + expanded_cn_indptr = np.vstack([cn_indptr + i for i in range(8)]).reshape(-1, order="F") + + # Detect all corresponding nodes by applying the expanded mask to the indices + expanded_cn_indices = g.cell_nodes().indices[expanded_cn_indptr] + + # Convert to right format. + cn_indices = np.reshape(expanded_cn_indices, (-1,8), order="C") + + # Reorder nodes to comply with the node numbering convention of meshio + # regarding hexahedron cells. + cn_indices = cn_indices[:, [0, 2, 6, 4, 1, 3, 7, 5]] + + # Initialize data structure if not available yet + if cell_type not in cell_to_nodes: + cell_to_nodes[cell_type] = np.empty((0,8), dtype=int) + # Store cell-node connectivity, and add offset taking into account previous grids cell_to_nodes[cell_type] = np.vstack( (cell_to_nodes[cell_type], cn_indices + nodes_offset) ) - # Identify remaining cells as polyhedra else: + # Identify remaining cells as polyhedra + # The general strategy is to define the connectivity as cell-face information, # where the faces are defined by nodes. Hence, this information is significantly # larger than the info provided for tetra cells. Here, we make use of the fact @@ -1340,26 +1435,25 @@ def _export_3d( nodes_offset += g.num_nodes cell_offset += g.num_cells - # Determine the total number of blocks. Recall that tetra and general - # polyhedron{n} cells are stored differently. - num_tetra_blocks = len(cell_to_nodes) + # Determine the total number of blocks. Recall that special cell types + # and general polyhedron{n} cells are stored differently. + num_special_blocks = len(cell_to_nodes) num_polyhedron_blocks = len(cell_to_faces) - num_blocks = num_tetra_blocks + num_polyhedron_blocks + num_blocks = num_special_blocks + num_polyhedron_blocks # Initialize the meshio data structure for the connectivity and cell ids. meshio_cells = np.empty(num_blocks, dtype=object) meshio_cell_id = np.empty(num_blocks, dtype=object) - # Store tetra cells. Use cell_to_nodes to store the connectivity info. + # Store cells with special cell types. for block, (cell_type, cell_block) in enumerate(cell_to_nodes.items()): meshio_cells[block] = meshio.CellBlock(cell_type, cell_block.astype(int)) meshio_cell_id[block] = np.array(cell_id[cell_type]) - # Store tetra cells first. Use the more general cell_to_faces to store - # the connectivity info. + # Store general polyhedra cells. for block, (cell_type, cell_block) in enumerate(cell_to_faces.items()): # Adapt the block number taking into account of previous cell types. - block += num_tetra_blocks + block += num_special_blocks meshio_cells[block] = meshio.CellBlock(cell_type, cell_block) meshio_cell_id[block] = np.array(cell_id[cell_type]) diff --git a/tests/unit/test_vtk.py b/tests/unit/test_vtk.py index 87260dd7d6..f3b31e73a0 100644 --- a/tests/unit/test_vtk.py +++ b/tests/unit/test_vtk.py @@ -3437,517 +3437,517 @@ def _single_grid_3d_cart_grid_vtu(self): 0 -1 5 -6 -25 -26 30 -31 +25 1 -2 6 -7 +31 26 -27 +1 +6 31 -32 +26 2 -3 7 -8 +32 27 -28 +2 +7 32 -33 +27 3 -4 8 -9 +33 28 -29 +3 +8 33 +28 +4 +9 34 +29 5 -6 10 -11 -30 -31 35 -36 +30 6 -7 11 -12 +36 31 -32 +6 +11 36 -37 +31 7 -8 12 -13 +37 32 -33 +7 +12 37 -38 +32 8 -9 13 -14 +38 33 -34 +8 +13 38 +33 +9 +14 39 +34 10 -11 15 -16 -35 -36 40 -41 +35 11 -12 16 -17 +41 36 -37 +11 +16 41 -42 +36 12 -13 17 -18 +42 37 -38 +12 +17 42 -43 +37 13 -14 18 -19 +43 38 -39 +13 +18 43 +38 +14 +19 44 +39 15 -16 20 -21 -40 -41 45 -46 +40 16 -17 21 -22 +46 41 -42 +16 +21 46 -47 +41 17 -18 22 -23 +47 42 -43 +17 +22 47 -48 +42 18 -19 23 -24 +48 43 -44 +18 +23 48 +43 +19 +24 49 +44 25 -26 30 -31 -50 -51 55 -56 +50 26 -27 31 -32 +56 51 -52 +26 +31 56 -57 +51 27 -28 32 -33 +57 52 -53 +27 +32 57 -58 +52 28 -29 33 -34 +58 53 -54 +28 +33 58 +53 +29 +34 59 +54 30 -31 35 -36 -55 -56 60 -61 +55 31 -32 36 -37 +61 56 -57 +31 +36 61 -62 +56 32 -33 37 -38 +62 57 -58 +32 +37 62 -63 +57 33 -34 38 -39 +63 58 -59 +33 +38 63 +58 +34 +39 64 +59 35 -36 40 -41 -60 -61 65 -66 +60 36 -37 41 -42 +66 61 -62 +36 +41 66 -67 +61 37 -38 42 -43 +67 62 -63 +37 +42 67 -68 +62 38 -39 43 -44 +68 63 -64 +38 +43 68 +63 +39 +44 69 +64 40 -41 45 -46 -65 -66 70 -71 +65 41 -42 46 -47 +71 66 -67 +41 +46 71 -72 +66 42 -43 47 -48 +72 67 -68 +42 +47 72 -73 +67 43 -44 48 -49 +73 68 -69 +43 +48 73 +68 +44 +49 74 +69 50 -51 55 -56 -75 -76 80 -81 +75 51 -52 56 -57 +81 76 -77 +51 +56 81 -82 +76 52 -53 57 -58 +82 77 -78 +52 +57 82 -83 +77 53 -54 58 -59 +83 78 -79 +53 +58 83 +78 +54 +59 84 +79 55 -56 60 -61 -80 -81 85 -86 +80 56 -57 61 -62 +86 81 -82 +56 +61 86 -87 +81 57 -58 62 -63 +87 82 -83 +57 +62 87 -88 +82 58 -59 63 -64 +88 83 -84 +58 +63 88 +83 +59 +64 89 +84 60 -61 65 -66 -85 -86 90 -91 +85 61 -62 66 -67 +91 86 -87 +61 +66 91 -92 +86 62 -63 67 -68 +92 87 -88 +62 +67 92 -93 +87 63 -64 68 -69 +93 88 -89 +63 +68 93 +88 +64 +69 94 +89 65 -66 70 -71 -90 -91 95 -96 +90 66 -67 71 -72 +96 91 -92 +66 +71 96 -97 +91 67 -68 72 -73 +97 92 -93 +67 +72 97 -98 +92 68 -69 73 -74 +98 93 -94 +68 +73 98 +93 +69 +74 99 +94 75 -76 80 -81 -100 -101 105 -106 +100 76 -77 81 -82 +106 101 -102 +76 +81 106 -107 +101 77 -78 82 -83 +107 102 -103 +77 +82 107 -108 +102 78 -79 83 -84 +108 103 -104 +78 +83 108 +103 +79 +84 109 +104 80 -81 85 -86 -105 -106 110 -111 +105 81 -82 86 -87 +111 106 -107 +81 +86 111 -112 +106 82 -83 87 -88 +112 107 -108 +82 +87 112 -113 +107 83 -84 88 -89 +113 108 -109 +83 +88 113 +108 +84 +89 114 +109 85 -86 90 -91 -110 -111 115 -116 +110 86 -87 91 -92 +116 111 -112 +86 +91 116 -117 +111 87 -88 92 -93 +117 112 -113 +87 +92 117 -118 +112 88 -89 93 -94 +118 113 -114 +88 +93 118 +113 +89 +94 119 +114 90 -91 95 -96 -115 -116 120 -121 +115 91 -92 96 -97 +121 116 -117 +91 +96 121 -122 +116 92 -93 97 -98 +122 117 -118 +92 +97 122 -123 +117 93 -94 98 -99 +123 118 -119 +93 +98 123 +118 +94 +99 124 +119 @@ -4018,2124 +4018,70 @@ def _single_grid_3d_cart_grid_vtu(self): -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 -42 - - - -6 -4 -0 -5 -30 -25 -4 -1 -6 -31 -26 -4 -0 -25 -26 -1 -4 -5 -30 -31 -6 -4 -0 -1 -6 -5 -4 -25 -26 -31 -30 -6 -4 -1 -6 -31 -26 -4 -2 -7 -32 -27 -4 -1 -26 -27 -2 -4 -6 -31 -32 -7 -4 -1 -2 -7 -6 -4 -26 -27 -32 -31 -6 -4 -2 -7 -32 -27 -4 -3 -8 -33 -28 -4 -2 -27 -28 -3 -4 -7 -32 -33 -8 -4 -2 -3 -8 -7 -4 -27 -28 -33 -32 -6 -4 -3 -8 -33 -28 -4 -4 -9 -34 -29 -4 -3 -28 -29 -4 -4 -8 -33 -34 -9 -4 -3 -4 -9 -8 -4 -28 -29 -34 -33 -6 -4 -5 -10 -35 -30 -4 -6 -11 -36 -31 -4 -5 -30 -31 -6 -4 -10 -35 -36 -11 -4 -5 -6 -11 -10 -4 -30 -31 -36 -35 -6 -4 -6 -11 -36 -31 -4 -7 12 -37 -32 -4 -6 -31 -32 -7 -4 -11 -36 -37 12 -4 -6 -7 12 -11 -4 -31 -32 -37 -36 -6 -4 -7 12 -37 -32 -4 -8 -13 -38 -33 -4 -7 -32 -33 -8 -4 12 -37 -38 -13 -4 -7 -8 -13 12 -4 -32 -33 -38 -37 -6 -4 -8 -13 -38 -33 -4 -9 -14 -39 -34 -4 -8 -33 -34 -9 -4 -13 -38 -39 -14 -4 -8 -9 -14 -13 -4 -33 -34 -39 -38 -6 -4 -10 -15 -40 -35 -4 -11 -16 -41 -36 -4 -10 -35 -36 -11 -4 -15 -40 -41 -16 -4 -10 -11 -16 -15 -4 -35 -36 -41 -40 -6 -4 -11 -16 -41 -36 -4 12 -17 -42 -37 -4 -11 -36 -37 12 -4 -16 -41 -42 -17 -4 -11 12 -17 -16 -4 -36 -37 -42 -41 -6 -4 12 -17 -42 -37 -4 -13 -18 -43 -38 -4 12 -37 -38 -13 -4 -17 -42 -43 -18 -4 12 -13 -18 -17 -4 -37 -38 -43 -42 -6 -4 -13 -18 -43 -38 -4 -14 -19 -44 -39 -4 -13 -38 -39 -14 -4 -18 -43 -44 -19 -4 -13 -14 -19 -18 -4 -38 -39 -44 -43 -6 -4 -15 -20 -45 -40 -4 -16 -21 -46 -41 -4 -15 -40 -41 -16 -4 -20 -45 -46 -21 -4 -15 -16 -21 -20 -4 -40 -41 -46 -45 -6 -4 -16 -21 -46 -41 -4 -17 -22 -47 -42 -4 -16 -41 -42 -17 -4 -21 -46 -47 -22 -4 -16 -17 -22 -21 -4 -41 -42 -47 -46 -6 -4 -17 -22 -47 -42 -4 -18 -23 -48 -43 -4 -17 -42 -43 -18 -4 -22 -47 -48 -23 -4 -17 -18 -23 -22 -4 -42 -43 -48 -47 -6 -4 -18 -23 -48 -43 -4 -19 -24 -49 -44 -4 -18 -43 -44 -19 -4 -23 -48 -49 -24 -4 -18 -19 -24 -23 -4 -43 -44 -49 -48 -6 -4 -25 -30 -55 -50 -4 -26 -31 -56 -51 -4 -25 -50 -51 -26 -4 -30 -55 -56 -31 -4 -25 -26 -31 -30 -4 -50 -51 -56 -55 -6 -4 -26 -31 -56 -51 -4 -27 -32 -57 -52 -4 -26 -51 -52 -27 -4 -31 -56 -57 -32 -4 -26 -27 -32 -31 -4 -51 -52 -57 -56 -6 -4 -27 -32 -57 -52 -4 -28 -33 -58 -53 -4 -27 -52 -53 -28 -4 -32 -57 -58 -33 -4 -27 -28 -33 -32 -4 -52 -53 -58 -57 -6 -4 -28 -33 -58 -53 -4 -29 -34 -59 -54 -4 -28 -53 -54 -29 -4 -33 -58 -59 -34 -4 -28 -29 -34 -33 -4 -53 -54 -59 -58 -6 -4 -30 -35 -60 -55 -4 -31 -36 -61 -56 -4 -30 -55 -56 -31 -4 -35 -60 -61 -36 -4 -30 -31 -36 -35 -4 -55 -56 -61 -60 -6 -4 -31 -36 -61 -56 -4 -32 -37 -62 -57 -4 -31 -56 -57 -32 -4 -36 -61 -62 -37 -4 -31 -32 -37 -36 -4 -56 -57 -62 -61 -6 -4 -32 -37 -62 -57 -4 -33 -38 -63 -58 -4 -32 -57 -58 -33 -4 -37 -62 -63 -38 -4 -32 -33 -38 -37 -4 -57 -58 -63 -62 -6 -4 -33 -38 -63 -58 -4 -34 -39 -64 -59 -4 -33 -58 -59 -34 -4 -38 -63 -64 -39 -4 -33 -34 -39 -38 -4 -58 -59 -64 -63 -6 -4 -35 -40 -65 -60 -4 -36 -41 -66 -61 -4 -35 -60 -61 -36 -4 -40 -65 -66 -41 -4 -35 -36 -41 -40 -4 -60 -61 -66 -65 -6 -4 -36 -41 -66 -61 -4 -37 -42 -67 -62 -4 -36 -61 -62 -37 -4 -41 -66 -67 -42 -4 -36 -37 -42 -41 -4 -61 -62 -67 -66 -6 -4 -37 -42 -67 -62 -4 -38 -43 -68 -63 -4 -37 -62 -63 -38 -4 -42 -67 -68 -43 -4 -37 -38 -43 -42 -4 -62 -63 -68 -67 -6 -4 -38 -43 -68 -63 -4 -39 -44 -69 -64 -4 -38 -63 -64 -39 -4 -43 -68 -69 -44 -4 -38 -39 -44 -43 -4 -63 -64 -69 -68 -6 -4 -40 -45 -70 -65 -4 -41 -46 -71 -66 -4 -40 -65 -66 -41 -4 -45 -70 -71 -46 -4 -40 -41 -46 -45 -4 -65 -66 -71 -70 -6 -4 -41 -46 -71 -66 -4 -42 -47 -72 -67 -4 -41 -66 -67 -42 -4 -46 -71 -72 -47 -4 -41 -42 -47 -46 -4 -66 -67 -72 -71 -6 -4 -42 -47 -72 -67 -4 -43 -48 -73 -68 -4 -42 -67 -68 -43 -4 -47 -72 -73 -48 -4 -42 -43 -48 -47 -4 -67 -68 -73 -72 -6 -4 -43 -48 -73 -68 -4 -44 -49 -74 -69 -4 -43 -68 -69 -44 -4 -48 -73 -74 -49 -4 -43 -44 -49 -48 -4 -68 -69 -74 -73 -6 -4 -50 -55 -80 -75 -4 -51 -56 -81 -76 -4 -50 -75 -76 -51 -4 -55 -80 -81 -56 -4 -50 -51 -56 -55 -4 -75 -76 -81 -80 -6 -4 -51 -56 -81 -76 -4 -52 -57 -82 -77 -4 -51 -76 -77 -52 -4 -56 -81 -82 -57 -4 -51 -52 -57 -56 -4 -76 -77 -82 -81 -6 -4 -52 -57 -82 -77 -4 -53 -58 -83 -78 -4 -52 -77 -78 -53 -4 -57 -82 -83 -58 -4 -52 -53 -58 -57 -4 -77 -78 -83 -82 -6 -4 -53 -58 -83 -78 -4 -54 -59 -84 -79 -4 -53 -78 -79 -54 -4 -58 -83 -84 -59 -4 -53 -54 -59 -58 -4 -78 -79 -84 -83 -6 -4 -55 -60 -85 -80 -4 -56 -61 -86 -81 -4 -55 -80 -81 -56 -4 -60 -85 -86 -61 -4 -55 -56 -61 -60 -4 -80 -81 -86 -85 -6 -4 -56 -61 -86 -81 -4 -57 -62 -87 -82 -4 -56 -81 -82 -57 -4 -61 -86 -87 -62 -4 -56 -57 -62 -61 -4 -81 -82 -87 -86 -6 -4 -57 -62 -87 -82 -4 -58 -63 -88 -83 -4 -57 -82 -83 -58 -4 -62 -87 -88 -63 -4 -57 -58 -63 -62 -4 -82 -83 -88 -87 -6 -4 -58 -63 -88 -83 -4 -59 -64 -89 -84 -4 -58 -83 -84 -59 -4 -63 -88 -89 -64 -4 -58 -59 -64 -63 -4 -83 -84 -89 -88 -6 -4 -60 -65 -90 -85 -4 -61 -66 -91 -86 -4 -60 -85 -86 -61 -4 -65 -90 -91 -66 -4 -60 -61 -66 -65 -4 -85 -86 -91 -90 -6 -4 -61 -66 -91 -86 -4 -62 -67 -92 -87 -4 -61 -86 -87 -62 -4 -66 -91 -92 -67 -4 -61 -62 -67 -66 -4 -86 -87 -92 -91 -6 -4 -62 -67 -92 -87 -4 -63 -68 -93 -88 -4 -62 -87 -88 -63 -4 -67 -92 -93 -68 -4 -62 -63 -68 -67 -4 -87 -88 -93 -92 -6 -4 -63 -68 -93 -88 -4 -64 -69 -94 -89 -4 -63 -88 -89 -64 -4 -68 -93 -94 -69 -4 -63 -64 -69 -68 -4 -88 -89 -94 -93 -6 -4 -65 -70 -95 -90 -4 -66 -71 -96 -91 -4 -65 -90 -91 -66 -4 -70 -95 -96 -71 -4 -65 -66 -71 -70 -4 -90 -91 -96 -95 -6 -4 -66 -71 -96 -91 -4 -67 -72 -97 -92 -4 -66 -91 -92 -67 -4 -71 -96 -97 -72 -4 -66 -67 -72 -71 -4 -91 -92 -97 -96 -6 -4 -67 -72 -97 -92 -4 -68 -73 -98 -93 -4 -67 -92 -93 -68 -4 -72 -97 -98 -73 -4 -67 -68 -73 -72 -4 -92 -93 -98 -97 -6 -4 -68 -73 -98 -93 -4 -69 -74 -99 -94 -4 -68 -93 -94 -69 -4 -73 -98 -99 -74 -4 -68 -69 -74 -73 -4 -93 -94 -99 -98 -6 -4 -75 -80 -105 -100 -4 -76 -81 -106 -101 -4 -75 -100 -101 -76 -4 -80 -105 -106 -81 -4 -75 -76 -81 -80 -4 -100 -101 -106 -105 -6 -4 -76 -81 -106 -101 -4 -77 -82 -107 -102 -4 -76 -101 -102 -77 -4 -81 -106 -107 -82 -4 -76 -77 -82 -81 -4 -101 -102 -107 -106 -6 -4 -77 -82 -107 -102 -4 -78 -83 -108 -103 -4 -77 -102 -103 -78 -4 -82 -107 -108 -83 -4 -77 -78 -83 -82 -4 -102 -103 -108 -107 -6 -4 -78 -83 -108 -103 -4 -79 -84 -109 -104 -4 -78 -103 -104 -79 -4 -83 -108 -109 -84 -4 -78 -79 -84 -83 -4 -103 -104 -109 -108 -6 -4 -80 -85 -110 -105 -4 -81 -86 -111 -106 -4 -80 -105 -106 -81 -4 -85 -110 -111 -86 -4 -80 -81 -86 -85 -4 -105 -106 -111 -110 -6 -4 -81 -86 -111 -106 -4 -82 -87 -112 -107 -4 -81 -106 -107 -82 -4 -86 -111 -112 -87 -4 -81 -82 -87 -86 -4 -106 -107 -112 -111 -6 -4 -82 -87 -112 -107 -4 -83 -88 -113 -108 -4 -82 -107 -108 -83 -4 -87 -112 -113 -88 -4 -82 -83 -88 -87 -4 -107 -108 -113 -112 -6 -4 -83 -88 -113 -108 -4 -84 -89 -114 -109 -4 -83 -108 -109 -84 -4 -88 -113 -114 -89 -4 -83 -84 -89 -88 -4 -108 -109 -114 -113 -6 -4 -85 -90 -115 -110 -4 -86 -91 -116 -111 -4 -85 -110 -111 -86 -4 -90 -115 -116 -91 -4 -85 -86 -91 -90 -4 -110 -111 -116 -115 -6 -4 -86 -91 -116 -111 -4 -87 -92 -117 -112 -4 -86 -111 -112 -87 -4 -91 -116 -117 -92 -4 -86 -87 -92 -91 -4 -111 -112 -117 -116 -6 -4 -87 -92 -117 -112 -4 -88 -93 -118 -113 -4 -87 -112 -113 -88 -4 -92 -117 -118 -93 -4 -87 -88 -93 -92 -4 -112 -113 -118 -117 -6 -4 -88 -93 -118 -113 -4 -89 -94 -119 -114 -4 -88 -113 -114 -89 -4 -93 -118 -119 -94 -4 -88 -89 -94 -93 -4 -113 -114 -119 -118 -6 -4 -90 -95 -120 -115 -4 -91 -96 -121 -116 -4 -90 -115 -116 -91 -4 -95 -120 -121 -96 -4 -90 -91 -96 -95 -4 -115 -116 -121 -120 -6 -4 -91 -96 -121 -116 -4 -92 -97 -122 -117 -4 -91 -116 -117 -92 -4 -96 -121 -122 -97 -4 -91 -92 -97 -96 -4 -116 -117 -122 -121 -6 -4 -92 -97 -122 -117 -4 -93 -98 -123 -118 -4 -92 -117 -118 -93 -4 -97 -122 -123 -98 -4 -92 -93 -98 -97 -4 -117 -118 -123 -122 -6 -4 -93 -98 -123 -118 -4 -94 -99 -124 -119 -4 -93 -118 -119 -94 -4 -98 -123 -124 -99 -4 -93 -94 -99 -98 -4 -118 -119 -124 -123 - - - -31 -62 -93 -124 -155 -186 -217 -248 -279 -310 -341 -372 -403 -434 -465 -496 -527 -558 -589 -620 -651 -682 -713 -744 -775 -806 -837 -868 -899 -930 -961 -992 -1023 -1054 -1085 -1116 -1147 -1178 -1209 -1240 -1271 -1302 -1333 -1364 -1395 -1426 -1457 -1488 -1519 -1550 -1581 -1612 -1643 -1674 -1705 -1736 -1767 -1798 -1829 -1860 -1891 -1922 -1953 -1984 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 +12 From b50758af42e987566651a4212c8ff3c64f7a4953 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sun, 15 May 2022 21:47:15 +0200 Subject: [PATCH 32/83] MAINT: Introduce types and namedtuples for meshio_geom. --- src/porepy/viz/exporter.py | 66 +++++++++++++++++++++++++++++--------- 1 file changed, 50 insertions(+), 16 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 0f96ecc9e1..e022251b18 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -26,6 +26,11 @@ # Object type to store data to export. Field = namedtuple('Field', ['name', 'values']) +# Object for managing meshio-relevant data, as well as a container +# for its storage, taking dimensions as inputs. +Meshio_Geom = namedtuple('Meshio_Geom', ['pts', 'connectivity', 'cell_ids']) +MD_Meshio_Geom = Dict[int, Meshio_Geom] + class Exporter: # TODO DOC """ @@ -141,13 +146,13 @@ def __init__( # meshio format. Include all but the 0-d grids self.dims = np.setdiff1d(self.gb.all_dims(), [0]) num_dims = self.dims.size - self.meshio_geom = dict(zip(self.dims, [tuple()] * num_dims)) # type: ignore + self.meshio_geom: MD_Meshio_Geom = dict(zip(self.dims, [tuple()] * num_dims)) # Generate infrastructure for storing fixed-dimensional mortar grids # in meshio format. self.m_dims = np.unique([d["mortar_grid"].dim for _, d in self.gb.edges()]) num_m_dims = self.m_dims.size - self.m_meshio_geom = dict(zip(self.m_dims, [tuple()] * num_m_dims)) # type: ignore + self.m_meshio_geom: MD_Meshio_Geom = dict(zip(self.m_dims, [tuple()] * num_m_dims)) # Generate geometrical information in meshio format self._update_meshio_geom() @@ -626,7 +631,7 @@ def _add_extra_subdomain_data(self, subdomain_data: SubdomainData) -> SubdomainD subdomain_data[(g, "mortar_side")] = pp.grids.mortar_grid.MortarSides.NONE_SIDE.value * ones #TODO include here? - #cell_id = meshio_geom[2] + #cell_id = meshio_geom.cell_ids ## add also the cells ids #cell_data.update({self.cell_id_key: cell_id}) @@ -816,6 +821,7 @@ def _export_gb_pvd(self, file_name: str, time_step: int) -> None: # Include the grids of all dimensions, but only if the grids of this # dimension are included in the vtk export for dim in self.dims: + # TODO in the current setting, is it at all possible to have meshio_geom None? if self.meshio_geom[dim] is not None: o_file.write( fm % self._make_file_name(self.file_name, time_step, dim) @@ -823,6 +829,7 @@ def _export_gb_pvd(self, file_name: str, time_step: int) -> None: # Same for mortar grids. for dim in self.m_dims: + # TODO in the current setting, is it at all possible to have meshio_geom None? if self.m_meshio_geom[dim] is not None: o_file.write( fm % self._make_file_name(self.file_name + "_mortar_", time_step, dim) @@ -946,7 +953,7 @@ def _export_1d( meshio_cell_id[block] = np.array(cell_id[cell_type]) # Return final meshio data: points, cell (connectivity), cell ids - return meshio_pts, meshio_cells, meshio_cell_id + return Meshio_Geom(meshio_pts, meshio_cells, meshio_cell_id) def _simplex_cell_to_nodes( self, @@ -1137,7 +1144,7 @@ def _export_2d( meshio_cell_id[block] = np.array(cell_id[cell_type]) # Return final meshio data: points, cell (connectivity), cell ids - return meshio_pts, meshio_cells, meshio_cell_id + return Meshio_Geom(meshio_pts, meshio_cells, meshio_cell_id) def _sort_point_pairs_numba(self, lines: np.ndarray) -> np.ndarray: """ @@ -1458,24 +1465,48 @@ def _export_3d( meshio_cell_id[block] = np.array(cell_id[cell_type]) # Return final meshio data: points, cell (connectivity), cell ids - return meshio_pts, meshio_cells, meshio_cell_id + return Meshio_Geom(meshio_pts, meshio_cells, meshio_cell_id) - def _write(self, fields: Iterable[Field], file_name: str, meshio_geom) -> None: + def _write( + self, + fields: Iterable[Field], + file_name: str, + meshio_geom: Meshio_Geom, + ) -> None: + """ + Interface to meshio for exporting. - cell_data = {} + Export geometrical mesh information and cell data. Also manage + the reuse of data. + + Parameters: + fields (iterable, Field): fields which shall be exported + file_name (str): name of final file of export + meshio_geom (Meshio_Geom): Namedtuple of points, + connectivity information, and cell ids in + meshio format. + + for a single dimension + """ + # TODO adapt doc on reuse - # we need to split the data for each group of geometrically uniform cells - cell_id = meshio_geom[2] - num_block = cell_id.size + # Initialize empty cell data dictinary + cell_data = {} + # Split the data for each group of geometrically uniform cells + # Utilize meshio_geom for this. for field in fields: + + # FIXME as implemented now, field.values should never be None. if field.values is None: continue - # for each field create a sub-vector for each geometrically uniform group of cells + # For each field create a sub-vector for each geometrically uniform group of cells + num_block = meshio_geom.cell_ids.size cell_data[field.name] = np.empty(num_block, dtype=object) - # fill up the data - for block, ids in enumerate(cell_id): + + # Fill up the data + for block, ids in enumerate(meshio_geom.cell_ids): if field.values.ndim == 1: cell_data[field.name][block] = field.values[ids] elif field.values.ndim == 2: @@ -1483,12 +1514,15 @@ def _write(self, fields: Iterable[Field], file_name: str, meshio_geom) -> None: else: raise ValueError + # TODO move to extra_subdomain_data # add also the cells ids - cell_data.update({self.cell_id_key: cell_id}) + reuse_data = self._reuse_data and self._prev_exported_time_step is not None + if not reuse_data: + cell_data.update({self.cell_id_key: meshio_geom.cell_ids}) # create the meshio object meshio_grid_to_export = meshio.Mesh( - meshio_geom[0], meshio_geom[1], cell_data=cell_data + meshio_geom.pts, meshio_geom.connectivity, cell_data=cell_data ) # Exclude/block fixed geometric data as grid points and connectivity information From 82de931b0e4be3ebb6bd30756056751079b60d4a Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Mon, 16 May 2022 22:37:27 +0200 Subject: [PATCH 33/83] MAINT: Export constant mesh data. --- src/porepy/viz/exporter.py | 196 +++++++++++++++++++++++++++++++------ 1 file changed, 168 insertions(+), 28 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index e022251b18..7abcaed150 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -156,6 +156,7 @@ def __init__( # Generate geometrical information in meshio format self._update_meshio_geom() + self._update_constant_mesh_data() # Counter for time step. Will be used to identify files of individual time step, # unless this is overridden by optional parameters in write @@ -171,6 +172,10 @@ def __init__( # constant over single time steps. self._prev_exported_time_step: Union[int, type(None)] = None + # Reference to the last time step used for exporting constant data. + self._time_step_counter_constant_data: int = 0 + self._exported_constant_data_up_to_date: bool = False + # Parameter to be used in several occasions for adding time stamps. self.padding = 6 @@ -240,6 +245,7 @@ def write_vtu( # Update geometrical info in meshio format for the updated grid self._update_meshio_geom() + self._update_constant_mesh_data() # Reset control parameter for the reuse of fixed data in the # write routine, so the updated mesh is used for further export. @@ -260,24 +266,38 @@ def write_vtu( # 2. Unify data type. subdomain_data, interface_data = self._sort_and_unify_data(data) - # Add geometrical info for nodes and edges - # TODO - reuse_data = self._reuse_data and self._prev_exported_time_step is not None - if reuse_data: - subdomain_data = self._add_extra_subdomain_data(subdomain_data) - interface_data = self._add_extra_interface_data(interface_data) - #subdomain_data = self._add_extra_subdomain_data(subdomain_data) - #interface_data = self._add_extra_interface_data(interface_data) + ## Add geometrical info for nodes and edges + ## TODO move somewhere else? or include an extra routine, and always execute after performing _update_meshio_geom + #reuse_data = self._reuse_data and self._prev_exported_time_step is not None + #if reuse_data: + # subdomain_data = self._add_extra_subdomain_data(subdomain_data) + # interface_data = self._add_extra_interface_data(interface_data) + ##subdomain_data = self._add_extra_subdomain_data(subdomain_data) + ##interface_data = self._add_extra_interface_data(interface_data) - # Export data and treat node and edge data separately + # Export subdomain and interface data to vtu format if existing if subdomain_data: - self._export_data_vtu(subdomain_data, time_step, "subdomain data") + self._export_data_vtu(subdomain_data, time_step) if interface_data: - self._export_data_vtu(interface_data, time_step, "interface data") + self._export_data_vtu(interface_data, time_step, interface_data = True) + + # Export constant data to vtu when outdated + if not self._exported_constant_data_up_to_date: + self._export_data_vtu(self._constant_subdomain_data, time_step, constant_data = True) + self._export_data_vtu(self._constant_interface_data, time_step, constant_data = True, interface_data = True) + + # Store the time step counter for later reference (required for pvd files) + self._time_step_counter_constant_data = time_step + + # Identify the constant data as fixed. Has the effect that + # the constant data won't be exported if not the mesh is updated + # or new constant data is added. + self._exported_constant_data_up_to_date = True # Export grid bucket to pvd format file_name = self._make_file_name(self.file_name, time_step, extension=".pvd") file_name = self._append_folder_name(self.folder_name, file_name) + # TODO include the constant data in _export_gb_pvd self._export_gb_pvd(file_name, time_step) # Update previous time step used for export to be used in the next application. @@ -715,11 +735,122 @@ def _add_extra_interface_data(self, interface_data: InterfaceData) -> InterfaceD return interface_data + def _update_constant_mesh_data(self) -> None: + """ + Construct/update subdomain and interface data related with geometry and topology. + + The data is identified as constant data. The final containers, stored as + attributes _constant_subdomain_data and _constant_interface_data, have + the same format as the output of _sort_and_unify_data. + """ +# # All extra fields to be added to the data container +# self._extra_subdomain_data = ["grid_dim", "is_mortar", "mortar_side"] +# # TODO include cell_ids, grid_node_number? +# # TODO is this list used anywhere? + + # Identify change in constant data. Has the effect that + # the constant data container will be exported at the + # next application of write_vtu(). + self._exported_constant_data_up_to_date: bool = False + + # Initialize container for constant subdomain data + if not hasattr(self, "_constant_subdomain_data"): + self._constant_subdomain_data = dict() + + # TODO include cell_id and grid_node_number + # Add mesh related, constant subdomain data by direct assignment + for g, d in self.gb.nodes(): + ones = np.ones(g.num_cells, dtype=int) + #self._constant_subdomain_data[(g, "cell_id")] = ... + self._constant_subdomain_data[(g, "grid_dim")] = g.dim * ones + #self._constant_subdomain_data[(g, "grid_node_number")] = d["node_number"] * ones # TODO? + self._constant_subdomain_data[(g, "is_mortar")] = 0 * ones + self._constant_subdomain_data[(g, "mortar_side")] = pp.grids.mortar_grid.MortarSides.NONE_SIDE.value * ones + + #TODO include here? + #cell_id = meshio_geom.cell_ids + ## add also the cells ids + #cell_data.update({self.cell_id_key: cell_id}) + + # Similarly for interface data + +# # All extra fields to be added to the data container +# self._extra_interface_data = ["grid_dim", "cell_id", "grid_edge_number", "is_mortar", "mortar_side"] +# # TODO is this used anywhere? + + # Initialize container for constant interface data + if not hasattr(self, "_constant_interface_data"): + self._constant_interface_data = dict() + + # TODO can't we do this more explicit and by that shorter and simpler? + # Add mesh related, constant interface data by direct assignment. + for e, d in self.gb.edges(): + + # Fetch mortar grid + mg = d["mortar_grid"] + + # Construct empty arrays for all extra edge data + self._constant_interface_data[(e, "grid_dim")] = np.empty(0, dtype=int) + self._constant_interface_data[(e, "cell_id")] = np.empty(0, dtype=int) + self._constant_interface_data[(e, "grid_edge_number")] = np.empty(0, dtype=int) + self._constant_interface_data[(e, "is_mortar")] = np.empty(0, dtype=int) + self._constant_interface_data[(e, "mortar_side")] = np.empty(0, dtype=int) + + # Initialize offset + mg_num_cells = 0 + + # Assign extra edge data by collecting values on both sides. + for side, g in mg.side_grids.items(): + ones = np.ones(g.num_cells, dtype=int) + + # Grid dimension of the mortar grid + self._constant_interface_data[(e, "grid_dim")] = np.hstack( + ( + self._constant_interface_data[(e, "grid_dim")], + g.dim * ones + ) + ) + + # Cell ids of the mortar grid + self._constant_interface_data[(e, "cell_id")] = np.hstack( + ( + self._constant_interface_data[(e, "cell_id")], + np.arange(g.num_cells, dtype=int) + mg_num_cells + ) + ) + + # Grid edge number of each edge + self._constant_interface_data[(e, "grid_edge_number")] = np.hstack( + ( + self._constant_interface_data[(e, "grid_edge_number")], + d["edge_number"] * ones + ) + ) + + # Whether the edge is mortar + self._constant_interface_data[(e, "is_mortar")] = np.hstack( + ( + self._constant_interface_data[(e, "is_mortar")], + ones + ) + ) + + # Side of the mortar + self._constant_interface_data[(e, "mortar_side")] = np.hstack( + ( + self._constant_interface_data[(e, "mortar_side")], + side.value * ones + ) + ) + + # Update offset + mg_num_cells += g.num_cells + def _export_data_vtu( self, data: Union[SubdomainData, InterfaceData], time_step: int, - data_type: str, + **kwargs, ) -> None: """ Collect data associated to a single grid dimension @@ -727,29 +858,38 @@ def _export_data_vtu( For each fixed dimension, all subdomains of that dimension and the data related to that subdomains will be exported simultaneously. - The same for interfaces. + Analogously for interfaces. Parameters: - data (Union[SubdomainData, InterfaceData]): Subdomain or edge data. The routine - notices itself of which type the data is and proceeds - accordingly. + data (Union[SubdomainData, InterfaceData]): Subdomain or interface data. time_step (int): time_step to be used to append the file name - data_type (str): has to be "subdomain data" or "interface data" indicating the - type of the data. + kwargs (optional): Optional keywords; + 'interface_data' (boolean) indicates whether data is associated to + an interface, default is False; + 'constant_data' (boolean) indicates whether data is treated as + constant in time, default is False. """ - # Determine whether data corresponds to subdomain or interface data. - assert(data_type in ["subdomain data", "interface data"]) - is_subdomain_data = data_type == "subdomain data" + # Check for optional keywords + self.interface_data: bool = kwargs.pop("interface_data", False) + self.constant_data: bool = kwargs.pop("constant_data", False) + if kwargs: + msg = "_export_data_vtu got unexpected keyword argument '{}'" + raise TypeError(msg.format(kwargs.popitem()[0])) # Fetch the dimnensions to be traversed. For subdomains, fetch the dimensions # of the available grids, and for interfaces fetch the dimensions of the available # mortar grids. + is_subdomain_data: bool = not self.interface_data dims = self.dims if is_subdomain_data else self.m_dims - # Define file name (extend in case of interface data) - file_name_base: str = self.file_name if is_subdomain_data else self.file_name + "_mortar" + # Define file name base + file_name_base: str = self.file_name + # Extend in case of constant data + file_name_base += "_constant" if self.constant_data else "" + # Extend in case of interface data + file_name_base += "_mortar" if self.interface_data else "" # Append folder name to file name base - file_name_base: str = self._append_folder_name(self.folder_name, file_name_base) + file_name_base = self._append_folder_name(self.folder_name, file_name_base) # Collect unique keys, and for unique sorting, sort by alphabet keys = list(set([key for _,key in data])) @@ -757,12 +897,12 @@ def _export_data_vtu( # Collect the data and extra data in a single stack for each dimension for dim in dims: - # Define the file name + # Define the full file name file_name: str = self._make_file_name(file_name_base, time_step, dim) - # Get all geometrical entities of dimension dim: subdomains - # with correct grid dimension for subdomain data, and interfaces - # with correct mortar grid dimension for edge data. + # Get all geometrical entities of dimension dim: + # subdomains with correct grid dimension for subdomain data, and + # interfaces with correct mortar grid dimension for interface data. # TODO update and simplify when new GridTree structure is in place. entities: Union[List[pp.Grid], List[Tuple[pp.Grid, pp.Grid]]] = [] if is_subdomain_data: From 886715975137fba4f91d1a2da35c5954b06baf56 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Mon, 16 May 2022 23:37:39 +0200 Subject: [PATCH 34/83] MAINT: Move definition of extra data into extra containers for constant data. --- src/porepy/viz/exporter.py | 148 +++---------------------------------- 1 file changed, 12 insertions(+), 136 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 7abcaed150..79cb7ef02e 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -266,15 +266,6 @@ def write_vtu( # 2. Unify data type. subdomain_data, interface_data = self._sort_and_unify_data(data) - ## Add geometrical info for nodes and edges - ## TODO move somewhere else? or include an extra routine, and always execute after performing _update_meshio_geom - #reuse_data = self._reuse_data and self._prev_exported_time_step is not None - #if reuse_data: - # subdomain_data = self._add_extra_subdomain_data(subdomain_data) - # interface_data = self._add_extra_interface_data(interface_data) - ##subdomain_data = self._add_extra_subdomain_data(subdomain_data) - ##interface_data = self._add_extra_interface_data(interface_data) - # Export subdomain and interface data to vtu format if existing if subdomain_data: self._export_data_vtu(subdomain_data, time_step) @@ -627,114 +618,6 @@ def isinstance_Tuple_str_array(pt) -> bool: return subdomain_data, interface_data - def _add_extra_subdomain_data(self, subdomain_data: SubdomainData) -> SubdomainData: - """Enhance subdomain data with geometrical information. - - Parameters: - subdomain_data (SubdomainData, optional): data contains which will be - enhanced - - Returns: - SubdomainData: previous subdomain_data with additional information on - the grid dimension, mortar side and whether the grid is - mortar. - """ - # All extra fields to be added to the data container - self._extra_subdomain_data = ["grid_dim", "is_mortar", "mortar_side"] - - # Add info by direct assignment - for g, d in self.gb.nodes(): - ones = np.ones(g.num_cells, dtype=int) - subdomain_data[(g, "grid_dim")] = g.dim * ones - #subdomain_data[(g, "grid_node_number")] = d["node_number"] * ones # TODO? - subdomain_data[(g, "is_mortar")] = 0 * ones - subdomain_data[(g, "mortar_side")] = pp.grids.mortar_grid.MortarSides.NONE_SIDE.value * ones - - #TODO include here? - #cell_id = meshio_geom.cell_ids - ## add also the cells ids - #cell_data.update({self.cell_id_key: cell_id}) - - return subdomain_data - - def _add_extra_interface_data(self, interface_data: InterfaceData) -> InterfaceData: - """Enahnce edge data with geometrical information. - - Parameters: - interface_data (InterfaceData, optional): data contains which will be - enhanced - - Returns: - InterfaceData: previous interface_data with additional information on - the grid dimension, the cell ids, the grid edge number, - mortar side and whether the grid is mortar. - """ - # All extra fields to be added to the data container - self._extra_interface_data = ["grid_dim", "cell_id", "grid_edge_number", "is_mortar", "mortar_side"] - - # Add info by direct assignment - have to take into account values on both sides - # TODO can't we do this more explicit and by that shorter and simpler? - for e, d in self.gb.edges(): - - # Fetch mortar grid - mg = d["mortar_grid"] - - # Construct empty arrays for all extra edge data - interface_data[(e, "grid_dim")] = np.empty(0, dtype=int) - interface_data[(e, "cell_id")] = np.empty(0, dtype=int) - interface_data[(e, "grid_edge_number")] = np.empty(0, dtype=int) - interface_data[(e, "is_mortar")] = np.empty(0, dtype=int) - interface_data[(e, "mortar_side")] = np.empty(0, dtype=int) - - # Assign extra edge data by collecting values on both sides. - mg_num_cells = 0 - for side, g in mg.side_grids.items(): - ones = np.ones(g.num_cells, dtype=int) - - # Grid dimension of the mortar grid - interface_data[(e, "grid_dim")] = np.hstack( - ( - interface_data[(e, "grid_dim")], - g.dim * ones - ) - ) - - # Cell ids of the mortar grid - interface_data[(e, "cell_id")] = np.hstack( - ( - interface_data[(e, "cell_id")], - np.arange(g.num_cells, dtype=int) + mg_num_cells - ) - ) - - # Grid edge number of each edge - interface_data[(e, "grid_edge_number")] = np.hstack( - ( - interface_data[(e, "grid_edge_number")], - d["edge_number"] * ones - ) - ) - - # Whether the edge is mortar - interface_data[(e, "is_mortar")] = np.hstack( - ( - interface_data[(e, "is_mortar")], - ones - ) - ) - - # Side of the mortar - interface_data[(e, "mortar_side")] = np.hstack( - ( - interface_data[(e, "mortar_side")], - side.value * ones - ) - ) - - mg_num_cells += g.num_cells - - return interface_data - def _update_constant_mesh_data(self) -> None: """ Construct/update subdomain and interface data related with geometry and topology. @@ -743,46 +626,39 @@ def _update_constant_mesh_data(self) -> None: attributes _constant_subdomain_data and _constant_interface_data, have the same format as the output of _sort_and_unify_data. """ -# # All extra fields to be added to the data container -# self._extra_subdomain_data = ["grid_dim", "is_mortar", "mortar_side"] -# # TODO include cell_ids, grid_node_number? -# # TODO is this list used anywhere? - # Identify change in constant data. Has the effect that # the constant data container will be exported at the # next application of write_vtu(). self._exported_constant_data_up_to_date: bool = False + # Define constant subdomain data related to the mesh + # Initialize container for constant subdomain data if not hasattr(self, "_constant_subdomain_data"): self._constant_subdomain_data = dict() - # TODO include cell_id and grid_node_number + # Initialize offset + g_num_cells: int = 0 + # Add mesh related, constant subdomain data by direct assignment for g, d in self.gb.nodes(): ones = np.ones(g.num_cells, dtype=int) - #self._constant_subdomain_data[(g, "cell_id")] = ... + self._constant_subdomain_data[(g, "cell_id")] = np.arange(g.num_cells, dtype=int) + g_num_cells self._constant_subdomain_data[(g, "grid_dim")] = g.dim * ones - #self._constant_subdomain_data[(g, "grid_node_number")] = d["node_number"] * ones # TODO? + if "node_number" in d: + self._constant_subdomain_data[(g, "grid_node_number")] = d["node_number"] * ones self._constant_subdomain_data[(g, "is_mortar")] = 0 * ones self._constant_subdomain_data[(g, "mortar_side")] = pp.grids.mortar_grid.MortarSides.NONE_SIDE.value * ones - #TODO include here? - #cell_id = meshio_geom.cell_ids - ## add also the cells ids - #cell_data.update({self.cell_id_key: cell_id}) - - # Similarly for interface data + # Update offset + g_num_cells += g.num_cells -# # All extra fields to be added to the data container -# self._extra_interface_data = ["grid_dim", "cell_id", "grid_edge_number", "is_mortar", "mortar_side"] -# # TODO is this used anywhere? + # Define constant interface data related to the mesh # Initialize container for constant interface data if not hasattr(self, "_constant_interface_data"): self._constant_interface_data = dict() - # TODO can't we do this more explicit and by that shorter and simpler? # Add mesh related, constant interface data by direct assignment. for e, d in self.gb.edges(): @@ -797,7 +673,7 @@ def _update_constant_mesh_data(self) -> None: self._constant_interface_data[(e, "mortar_side")] = np.empty(0, dtype=int) # Initialize offset - mg_num_cells = 0 + mg_num_cells: int = 0 # Assign extra edge data by collecting values on both sides. for side, g in mg.side_grids.items(): From cda7d13aa2f049968c9efa353b81a35bb77a8adc Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Mon, 16 May 2022 23:49:18 +0200 Subject: [PATCH 35/83] MAINT: Remove old code targeting reducing computational time for exporting by copying from old exports. --- src/porepy/viz/exporter.py | 226 +------------------------------------ 1 file changed, 5 insertions(+), 221 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 79cb7ef02e..8280a3c355 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -113,10 +113,7 @@ def __init__( folder_name (str, optional): folder name, all files are stored in kwargs (optional): Optional keywords; 'fixed_grid' (boolean) to control whether the grid(bucket) may be redfined; - 'binary' (boolean) controling whether data is stored in binary format; - 'reuse_data' (boolean) controling whether geometrical data is - reused from the previous time step when exporting to file. - + 'binary' (boolean) controling whether data is stored in binary format. """ # Exporter is operating on grid buckets. If a grid is provided, convert to a grid bucket. if isinstance(gb, pp.Grid): @@ -134,7 +131,6 @@ def __init__( # Check for optional keywords self.fixed_grid: bool = kwargs.pop("fixed_grid", True) self.binary: bool = kwargs.pop("binary", True) - self._reuse_data: bool = kwargs.pop("reuse_data", False) # NOTE for now, do not set True per default. if kwargs: msg = "Exporter() got unexpected keyword argument '{}'" raise TypeError(msg.format(kwargs.popitem()[0])) @@ -165,13 +161,6 @@ def __init__( # Storage for file name extensions for time steps self._exported_time_step_file_names: List[int] = [] - # Time step used in the latest write routine. Will be optionally used - # to retain constant geometrical and extra data in the next write routine. - # It will be used as control parameter, and needs to be reset when the - # general file name is replaces and/or the geometrical data is not - # constant over single time steps. - self._prev_exported_time_step: Union[int, type(None)] = None - # Reference to the last time step used for exporting constant data. self._time_step_counter_constant_data: int = 0 self._exported_constant_data_up_to_date: bool = False @@ -247,10 +236,6 @@ def write_vtu( self._update_meshio_geom() self._update_constant_mesh_data() - # Reset control parameter for the reuse of fixed data in the - # write routine, so the updated mesh is used for further export. - self._prev_exported_time_step = None - # If the problem is time dependent, but no time step is set, we set one # using the updated, internal counter. if time_dependent and time_step is None: @@ -291,10 +276,6 @@ def write_vtu( # TODO include the constant data in _export_gb_pvd self._export_gb_pvd(file_name, time_step) - # Update previous time step used for export to be used in the next application. - if time_step is not None: - self._prev_exported_time_step = time_step - # TODO does not contain all keywords as in write_vtu. make consistent? def write_pvd( self, @@ -837,7 +818,6 @@ def _export_gb_pvd(self, file_name: str, time_step: int) -> None: # Include the grids of all dimensions, but only if the grids of this # dimension are included in the vtk export for dim in self.dims: - # TODO in the current setting, is it at all possible to have meshio_geom None? if self.meshio_geom[dim] is not None: o_file.write( fm % self._make_file_name(self.file_name, time_step, dim) @@ -845,7 +825,6 @@ def _export_gb_pvd(self, file_name: str, time_step: int) -> None: # Same for mortar grids. for dim in self.m_dims: - # TODO in the current setting, is it at all possible to have meshio_geom None? if self.m_meshio_geom[dim] is not None: o_file.write( fm % self._make_file_name(self.file_name + "_mortar_", time_step, dim) @@ -1490,10 +1469,7 @@ def _write( meshio_geom: Meshio_Geom, ) -> None: """ - Interface to meshio for exporting. - - Export geometrical mesh information and cell data. Also manage - the reuse of data. + Interface to meshio for exporting cell data. Parameters: fields (iterable, Field): fields which shall be exported @@ -1504,8 +1480,6 @@ def _write( for a single dimension """ - # TODO adapt doc on reuse - # Initialize empty cell data dictinary cell_data = {} @@ -1530,207 +1504,17 @@ def _write( else: raise ValueError - # TODO move to extra_subdomain_data - # add also the cells ids - reuse_data = self._reuse_data and self._prev_exported_time_step is not None - if not reuse_data: - cell_data.update({self.cell_id_key: meshio_geom.cell_ids}) + # Add constant cell_ids. To be entirely moved to constant data soon. + cell_data.update({self.cell_id_key: meshio_geom.cell_ids}) - # create the meshio object + # Create the meshio object meshio_grid_to_export = meshio.Mesh( meshio_geom.pts, meshio_geom.connectivity, cell_data=cell_data ) - # Exclude/block fixed geometric data as grid points and connectivity information - # by removing fields mesh and extra subdomain/interface data from meshio_grid_to_export. - # Do this only if explicitly asked in init and if a suitable previous time step - # had been exported already. This data will be copied to the final VTKFile - # from a reference file from this previous time step. - to_copy_prev_exported_data = self._reuse_data and self._prev_exported_time_step is not None - if to_copy_prev_exported_data: - self._remove_fixed_mesh_data(meshio_grid_to_export) - # Write mesh information and data to VTK format. meshio.write(file_name, meshio_grid_to_export, binary=self.binary) - # Include the fixed mesh data and the extra subdomain/interface data by copy - # if remove earlier from meshio_grid_to_export. - if to_copy_prev_exported_data: - self._copy_fixed_mesh_data(file_name) - - def _stash_extra_data(self, meshio_grid: meshio._mesh.Mesh) -> meshio._mesh.Mesh: - """Remove points, cells, and cell data related to the extra subdomain and - interface data from given meshio_grid. - - Auxiliary routine in _write. - - Parameters: - meshio_grid (meshio._mesh.Mesh): The meshio grid to be cleaned. - - Returns: - meshio._mesh.Mesh: Cleaned meshio grid. - - """ - - # Empty points and cells deactivate the export of geometric information - # in meshio. For more details, check the method 'write' in meshio.vtu._vtu. - meshio_grid.points = np.empty((0,3)) - meshio_grid.cells = [] - - # Deactivate extra node/edge data, incl. cell_id as added in _export_list_data. - extra_node_edge_names = set([ - "cell_id", - "grid_dim", - "grid_node_number", - "grid_edge_number", - "is_mortar", - "mortar_side" - ]) - # Simply delete from meshio grid - for key in extra_node_edge_names: - if key in meshio_grid.cell_data: - del meshio_grid.cell_data[key] - - def _copy_fixed_mesh_data(self, file_name: str) -> None: - """Given two VTK files (in xml format), transfer all fixed geometric - info from a reference to a new file. - - This includes Piece, Points, Cells, and extra subdomain and interface data. - - Parameters: - file_name(str): Incomplete VTK file, to be completed. - - """ - - # Determine the path to the reference file, based on the current - # file_name and the previous time step. - - # Find latest occurence of "." to identify extension. - # Reverse string in order to use index(). Add 1 to cancel - # the effect of te reverse. - dot_index = list(file_name)[::-1].index(".") + 1 - extension = "".join(list(file_name)[-dot_index:]) - if not extension == ".vtu": - raise NotImplementedError("Only support for VTKFile.") - - # Extract file_name without extension and the time_step stamp. - # For this, assume the time stamp occurs after the latest underscore. - # Use a similar approach as for finding the extension. - latest_underscore_index = list(file_name)[::-1].index("_") - file_name_without_time_extension = "".join(list(file_name)[:-latest_underscore_index]) - - # As in _make_file_name, use padding for the time stamp - prev_exported_time = str(self._prev_exported_time_step).zfill(self.padding) - - # Add the time stamp of the previously exported time step and the same extension. - ref_file_name = ( - file_name_without_time_extension - + prev_exported_time - + extension - ) - - # Fetch VTK file in XML format. - tree = ET.parse(file_name) - new_vtkfile = tree.getroot() - - # Fetch reference VTK file in XML format - ref_tree = ET.parse(ref_file_name) - ref_vtkfile = ref_tree.getroot() - - def fetch_child(root: ET.Element, key: str) -> Union[ET.Element, type(None)]: - """Find the unique (nested) child in xml file root with tag key. - """ - # Iterate over all children and count, by always just - # updating the latest counter. - count = 0 - for count, child in enumerate(root.iter(key)): - count += 1 - # Do not accept non-uniqueness - if count > 1: - raise ValueError(f"No child calles {key} present.") - # Return child, if it exists (and is unique) or None if not. - return child if count==1 else None - - def fetch_data_array( - root: ET.Element, - key: str, - check_existence: bool = True - ) -> Union[ET.Element, type(None)]: - """Find the unique DataArray in xml file root with "Name"=key, - where "Name" is an element of the attributed of the DataArray. - """ - # Count how many candidates have been found (hopefully in the end - # it is just one. - count = 0 - - # To search for nested children in xml files, iterate.. - for child in root.iter("DataArray"): - # Check if the child has the correct Name - if child.attrib["Name"] == key: - count += 1 - data_array = child - - # Check existence only if required. - if check_existence: - assert(count > 0) - - # Check uniqueness of candidates for correct . - if count > 1: - raise ValueError(f"DataArray with name {key} not unique.") - - # Return the DataArray if it exists, otherwise return None - return data_array if count > 0 else None - - # Transfer the attributes of the block. It contains - # the geometrical information on the number of points and cells. - ref_piece = fetch_child(ref_vtkfile, "Piece") - new_piece = fetch_child(new_vtkfile, "Piece") - new_piece.attrib = ref_piece.attrib - - # Transfer the content of the with "Name"="Points" - # among its attributes. It contains the coordinates of the points. - ref_points = fetch_data_array(ref_vtkfile, "Points") - new_points = fetch_data_array(new_vtkfile, "Points") - new_points.text = ref_points.text - - # Transfer the which contains cell information including - # cell types and the connectivity. is expected to be not included - # in the current VTKFile. It is therefore added as a whole. It is - # assumed to be a child of - ref_cells = fetch_child(ref_vtkfile, "Cells") - new_piece.insert(1, ref_cells) - - # Transfer extra node/edge data with the keys: - extra_node_edge_names = set([ - "cell_id", - "grid_dim", - "grid_node_number", - "grid_edge_number", - "is_mortar", - "mortar_side" - ]) - - # These are separately stored as under . Hence, fetch - # the corrsponding child and search for the . - new_cell_data = fetch_child(new_vtkfile, "CellData") - # If no available, add to and repeat search for . - if new_cell_data == None: - new_cell_data = ET.Element("CellData") - new_piece.append(new_cell_data) - - # Loop over all extra data to check whether it is included in the reference - # file. If so, copy it. - for key in extra_data: - ref_data_array = fetch_data_array(ref_vtkfile, key, check_existence=False) - new_data_array = fetch_data_array(new_vtkfile, key, check_existence=False) - - # Copy data only if it is included in the reference file, but not the current. - if ref_data_array is not None and new_data_array is None: - new_cell_data.append(ref_data_array) - - # Write updates to file - tree.write(file_name) - def _append_folder_name(self, folder_name: Optional[str] = None, name: str = "") -> str: """ Auxiliary method setting up potentially non-existent folder structure and From a4d82601a3d796f48c6a34a46713f45b98c0007e Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Tue, 17 May 2022 19:55:48 +0200 Subject: [PATCH 36/83] MAINT/DOC: Small changes to doc and error messages. --- src/porepy/viz/exporter.py | 64 ++++++++++++++++++++++++-------------- 1 file changed, 40 insertions(+), 24 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 8280a3c355..3a582774de 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -45,7 +45,7 @@ class Exporter: Optional arguments in kwargs: fixed_grid: (optional) in a time dependent simulation specify if the grid changes in time or not. The default is True. - binary: export in binary format, default is True. + binary: (optional) export in binary format, default is True. How to use: If you need to export a single grid: @@ -83,6 +83,7 @@ class Exporter: # Altogether allowed data structures to define data DataInput = Union[ + # Keys for states str, # Subdomain specific data types Tuple[Union[pp.Grid, List[pp.Grid]], str], @@ -338,7 +339,8 @@ def _sort_and_unify_data( The routine has two goals: 1. Splitting data into subdomain and interface data. - 2. Unify the data format into tuples of grid/edge, name, data array. + 2. Unify the data format. Store data in dictionaries, with keys given by + grids/edges and names, and values given by the data arrays. Parameters: data (Union[DataInput, List[DataInput]], optional): data @@ -364,7 +366,8 @@ def _sort_and_unify_data( # Auxiliary function transforming scalar ranged values # to vector ranged values when suitable. def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: - """Check wether the value array has the right dimension correpsonding + """ + Check wether the value array has the right dimension corresponding to the grid size. If possible, translate the value to a vectorial object, But do nothing if the data naturally can be interpreted as scalar data. @@ -394,7 +397,6 @@ def isinstance_interface(e: Tuple[pp.Grid, pp.Grid]) -> bool: ) def isinstance_Tuple_subdomain_str(t: Tuple[Union[pp.Grid, List[pp.Grid]], str]) -> bool: - # TODO flip the order """ Implementation of isinstance(t, Tuple[Union[pp.Grid, List[pp.Grid]], str]). @@ -412,7 +414,6 @@ def isinstance_Tuple_subdomain_str(t: Tuple[Union[pp.Grid, List[pp.Grid]], str]) return False def isinstance_Tuple_interface_str(t: Tuple[Union[Tuple[pp.Grid, pp.Grid], List[Tuple[pp.Grid, pp.Grid]]], str]) -> bool: - # TODO flip the order """ Implementation of isinstance(t, Tuple[Union[Edge, List[Edge]], str]). @@ -450,10 +451,13 @@ def isinstance_Tuple_str_array(pt) -> bool: return list(map(type, pt)) == [str, np.ndarray] # Loop over all data points and convert them collect them in the format - # (grid/edge, key, data) for single grids etc. + # (grid/edge, key, data) for single grids etc. and store them in two + # dictionaries with keys (grid/egde, key) and value given by data. + # Distinguish here between subdomain and interface data and store + # accordingly. for pt in data: - # Allow for different cases. Distinguish each case separately + # Allow for different cases. Distinguish each case separately. # Case 1: Data provided by the key of a field only - could be both node # and edge data. Idea: Collect all data corresponding to grids and edges @@ -466,7 +470,7 @@ def isinstance_Tuple_str_array(pt) -> bool: # Fetch grids and interfaces as well as data associated to the key has_key = False - # Try to find the key in the data dictionary associated to nodes + # Try to find the key in the data dictionary associated to subdomains for g, d in self.gb.nodes(): if pp.STATE in d and key in d[pp.STATE]: # Mark the key as found @@ -478,7 +482,7 @@ def isinstance_Tuple_str_array(pt) -> bool: # Add data point in correct format to the collection subdomain_data[(g, key)] = value - # Try to find the key in the data dictionary associated to edges + # Try to find the key in the data dictionary associated to interfaces for e, d in self.gb.edges(): if pp.STATE in d and key in d[pp.STATE]: # Mark the key as found @@ -486,7 +490,7 @@ def isinstance_Tuple_str_array(pt) -> bool: # Fetch data and convert to vectorial format if suggested by the size mg = d["mortar_grid"] - value: np.ndarray = toVectorFormat(d[pp.STATE][key], mg) # TODO g? + value: np.ndarray = toVectorFormat(d[pp.STATE][key], mg) # Add data point in correct format to the collection interface_data[(e, key)] = value @@ -502,7 +506,9 @@ def isinstance_Tuple_str_array(pt) -> bool: # By construction, the first component contains grids. # Unify by converting the first component to a list - grids: List[pp.Grid] = pt[0] # if isinstance(pt[0], list) else [pt[0]] + grids: List[pp.Grid] = ( + pt[0] if isinstance(pt[0], list) else [pt[0]] + ) # By construction, the second component contains a key. key: str = pt[1] @@ -511,11 +517,12 @@ def isinstance_Tuple_str_array(pt) -> bool: for g in grids: # Fetch the data dictionary containing the data value - d = self.gb.node_props(g) # TODO type + d = self.gb.node_props(g) # Make sure the data exists. if not (pp.STATE in d and key in d[pp.STATE]): - raise ValueError("No state with prescribed key available on selected grids.") + raise ValueError(f"""No state with prescribed key {key} + available on selected subdomains.""") # Fetch data and convert to vectorial format if suitable value: np.ndarray = toVectorFormat(d[pp.STATE][key], g) @@ -530,7 +537,9 @@ def isinstance_Tuple_str_array(pt) -> bool: # By construction, the first component contains grids. # Unify by converting the first component to a list - edges: Union[Interface, List[Interface]] = pt[0] if isinstance(pt[0], list) else [pt[0]] + edges: Union[Interface, List[Interface]] = ( + pt[0] if isinstance(pt[0], list) else [pt[0]] + ) # By construction, the second component contains a key. key: str = pt[1] @@ -539,10 +548,12 @@ def isinstance_Tuple_str_array(pt) -> bool: for e in edges: # Fetch the data dictionary containing the data value - d = self.gb.edge_props(e) # TODO type + d = self.gb.edge_props(e) # Make sure the data exists. - assert(pp.STATE in d and key in d[pp.STATE]) + if not (pp.STATE in d and key in d[pp.STATE]): + raise ValueError(f"""No state with prescribed key {key} + available on selected interfaces.""") # Fetch data and convert to vectorial format if suitable mg = d["mortar_grid"] @@ -557,11 +568,13 @@ def isinstance_Tuple_str_array(pt) -> bool: # Idea: Extract the unique grid and continue as in Case 2. elif isinstance_Tuple_str_array(pt): - # Fetch the correct grid. This option is only supported for grid buckets containing a single grid. + # Fetch the correct grid. This option is only supported for grid + # buckets containing a single grid. grids: List[pp.Grid] = [g for g, _ in self.gb.nodes()] g: pp.Grid = grids[0] if not len(grids)==1: - raise ValueError(f"The data type used for {pt} is only supported if the grid bucket only contains a single grid.") + raise ValueError(f"""The data type used for {pt} is only + supported if the grid bucket only contains a single grid.""") # Fetch remaining ingredients required to define node data element key: str = pt[0] @@ -587,7 +600,7 @@ def isinstance_Tuple_str_array(pt) -> bool: # Data point in correct format e: Interface = pt[0] key: str = pt[1] - d = self.gb.edge_props(e) # TODO type + d = self.gb.edge_props(e) mg = d["mortar_grid"] value: np.ndarray = toVectorFormat(pt[2], mg) @@ -770,7 +783,6 @@ def _export_data_vtu( # Construct the list of fields represented on this dimension. fields: List[Field] = [] for key in keys: - # Collect the values associated to all entities values = [] for e in entities: @@ -778,7 +790,10 @@ def _export_data_vtu( values.append(data[(e,key)]) # Require data for all or none entities of that dimension. - assert(len(values) in [0, len(entities)]) + # FIXME: By changing the export strategy, this could be fixed. + if len(values) not in [0, len(entities)]: + raise ValueError(f"""Insufficient amount of data provided for + key {key} on dimension {dim}.""") # If data has been found, append data to list after stacking # values for different entities @@ -786,14 +801,15 @@ def _export_data_vtu( field = Field(key, np.hstack(values)) fields.append(field) - # Print data for the particular dimension. Since geometric - # info is required distinguish between subdomain and interface data. + # Print data for the particular dimension. Since geometric info + # is required distinguish between subdomain and interface data. meshio_geom = self.meshio_geom[dim] if is_subdomain_data else self.m_meshio_geom[dim] if meshio_geom is not None: self._write(fields, file_name, meshio_geom) def _export_gb_pvd(self, file_name: str, time_step: int) -> None: - """Routine to export to pvd format and collect all data scattered over + """ + Routine to export to pvd format and collect all data scattered over several files for distinct grid dimensions. Parameters: From bc14d4fb1c2a3ecdb77daa27fef49978f681b197 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Tue, 17 May 2022 23:21:25 +0200 Subject: [PATCH 37/83] MAINT/TST: Restructure output of constant data. Cell ids are treated as constant mesh-related data. Export again other mesh-related constant data as grid dim etc. Adapt tests. --- src/porepy/viz/exporter.py | 97 +- tests/unit/test_vtk.py | 2199 +++++++++++++++++++++++++++++------- 2 files changed, 1870 insertions(+), 426 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 3a582774de..3ab33148d4 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -113,8 +113,12 @@ def __init__( file_name (str): basis for file names used for storing the output folder_name (str, optional): folder name, all files are stored in kwargs (optional): Optional keywords; - 'fixed_grid' (boolean) to control whether the grid(bucket) may be redfined; - 'binary' (boolean) controling whether data is stored in binary format. + 'fixed_grid' (boolean) to control whether the grid(bucket) may be redfined + (default True); + 'binary' (boolean) controling whether data is stored in binary format + (default True); + 'export_constants_separately' (boolean) controling whether + constant data is exported in separate files (default True). """ # Exporter is operating on grid buckets. If a grid is provided, convert to a grid bucket. if isinstance(gb, pp.Grid): @@ -132,13 +136,11 @@ def __init__( # Check for optional keywords self.fixed_grid: bool = kwargs.pop("fixed_grid", True) self.binary: bool = kwargs.pop("binary", True) + self.export_constants_separately: bool = kwargs.pop("export_constants_separately", True) if kwargs: msg = "Exporter() got unexpected keyword argument '{}'" raise TypeError(msg.format(kwargs.popitem()[0])) - # Hard code keywords - self.cell_id_key = "cell_id" - # Generate infrastructure for storing fixed-dimensional grids in # meshio format. Include all but the 0-d grids self.dims = np.setdiff1d(self.gb.all_dims(), [0]) @@ -252,25 +254,47 @@ def write_vtu( # 2. Unify data type. subdomain_data, interface_data = self._sort_and_unify_data(data) + # Export constant data to separate or standard files + if self.export_constants_separately: + # Export constant data to vtu when outdated + if not self._exported_constant_data_up_to_date: + # Export constant subdomain data to vtu + self._export_data_vtu( + self._constant_subdomain_data, + time_step, + constant_data = True + ) + + # Export constant interface data to vtu + self._export_data_vtu( + self._constant_interface_data, + time_step, + constant_data = True, + interface_data = True + ) + + # Store the time step counter for later reference + # (required for pvd files) + self._time_step_counter_constant_data = time_step + + # Identify the constant data as fixed. Has the effect + # that the constant data won't be exported if not the + # mesh is updated or new constant data is added. + self._exported_constant_data_up_to_date = True + else: + # Append constant subdomain and interface data to the + # standard containers for subdomain and interface data. + for key, value in self._constant_subdomain_data.items(): + subdomain_data[key] = value.copy() + for key, value in self._constant_interface_data.items(): + interface_data[key] = value.copy() + # Export subdomain and interface data to vtu format if existing if subdomain_data: self._export_data_vtu(subdomain_data, time_step) if interface_data: self._export_data_vtu(interface_data, time_step, interface_data = True) - # Export constant data to vtu when outdated - if not self._exported_constant_data_up_to_date: - self._export_data_vtu(self._constant_subdomain_data, time_step, constant_data = True) - self._export_data_vtu(self._constant_interface_data, time_step, constant_data = True, interface_data = True) - - # Store the time step counter for later reference (required for pvd files) - self._time_step_counter_constant_data = time_step - - # Identify the constant data as fixed. Has the effect that - # the constant data won't be exported if not the mesh is updated - # or new constant data is added. - self._exported_constant_data_up_to_date = True - # Export grid bucket to pvd format file_name = self._make_file_name(self.file_name, time_step, extension=".pvd") file_name = self._append_folder_name(self.folder_name, file_name) @@ -631,22 +655,16 @@ def _update_constant_mesh_data(self) -> None: if not hasattr(self, "_constant_subdomain_data"): self._constant_subdomain_data = dict() - # Initialize offset - g_num_cells: int = 0 - # Add mesh related, constant subdomain data by direct assignment for g, d in self.gb.nodes(): ones = np.ones(g.num_cells, dtype=int) - self._constant_subdomain_data[(g, "cell_id")] = np.arange(g.num_cells, dtype=int) + g_num_cells + self._constant_subdomain_data[(g, "cell_id")] = np.arange(g.num_cells, dtype=int) self._constant_subdomain_data[(g, "grid_dim")] = g.dim * ones if "node_number" in d: self._constant_subdomain_data[(g, "grid_node_number")] = d["node_number"] * ones self._constant_subdomain_data[(g, "is_mortar")] = 0 * ones self._constant_subdomain_data[(g, "mortar_side")] = pp.grids.mortar_grid.MortarSides.NONE_SIDE.value * ones - # Update offset - g_num_cells += g.num_cells - # Define constant interface data related to the mesh # Initialize container for constant interface data @@ -846,6 +864,32 @@ def _export_gb_pvd(self, file_name: str, time_step: int) -> None: fm % self._make_file_name(self.file_name + "_mortar_", time_step, dim) ) + # If constant data is exported to separate vtu files, also include + # these here. The procedure is similar to the above, but the file names + # incl. the relevant time step has to be adjusted. + if self.export_constants_separately: + # Constant subdomain data. + for dim in self.dims: + if self.meshio_geom[dim] is not None: + o_file.write( + fm % self._make_file_name( + self.file_name + "_constant", + self._time_step_counter_constant_data, + dim + ) + ) + + # Constant interface data. + for dim in self.m_dims: + if self.m_meshio_geom[dim] is not None: + o_file.write( + fm % self._make_file_name( + self.file_name + "_constant_mortar_", + self._time_step_counter_constant_data, + dim + ) + ) + o_file.write("\n" + "") o_file.close() @@ -1520,9 +1564,6 @@ def _write( else: raise ValueError - # Add constant cell_ids. To be entirely moved to constant data soon. - cell_data.update({self.cell_id_key: meshio_geom.cell_ids}) - # Create the meshio object meshio_grid_to_export = meshio.Mesh( meshio_geom.pts, meshio_geom.connectivity, cell_data=cell_data diff --git a/tests/unit/test_vtk.py b/tests/unit/test_vtk.py index f3b31e73a0..8ab5e1d1cc 100644 --- a/tests/unit/test_vtk.py +++ b/tests/unit/test_vtk.py @@ -30,7 +30,13 @@ def test_single_grid_1d(self): dummy_scalar = np.ones(g.num_cells) * g.dim dummy_vector = np.ones((3, g.num_cells)) * g.dim - save = pp.Exporter(g, self.file_name, self.folder, binary=False) + save = pp.Exporter( + g, + self.file_name, + self.folder, + binary=False, + export_constants_separately = False + ) save.write_vtu([("dummy_scalar", dummy_scalar), ("dummy_vector", dummy_vector)]) with open(self.folder + self.file_name + "_1.vtu", "r") as content_file: @@ -44,7 +50,13 @@ def test_single_grid_2d_simplex(self): dummy_scalar = np.ones(g.num_cells) * g.dim dummy_vector = np.ones((3, g.num_cells)) * g.dim - save = pp.Exporter(g, self.file_name, self.folder, binary=False) + save = pp.Exporter( + g, + self.file_name, + self.folder, + binary=False, + export_constants_separately = False + ) save.write_vtu([("dummy_scalar", dummy_scalar), ("dummy_vector", dummy_vector)]) with open(self.folder + self.file_name + "_2.vtu", "r") as content_file: @@ -58,7 +70,13 @@ def test_single_grid_2d_cart(self): dummy_scalar = np.ones(g.num_cells) * g.dim dummy_vector = np.ones((3, g.num_cells)) * g.dim - save = pp.Exporter(g, self.file_name, self.folder, binary=False) + save = pp.Exporter( + g, + self.file_name, + self.folder, + binary=False, + export_constants_separately = False + ) save.write_vtu([("dummy_scalar", dummy_scalar), ("dummy_vector", dummy_vector)]) with open(self.folder + self.file_name + "_2.vtu", "r") as content_file: @@ -74,7 +92,13 @@ def test_single_grid_2d_polytop(self): dummy_scalar = np.ones(g.num_cells) * g.dim dummy_vector = np.ones((3, g.num_cells)) * g.dim - save = pp.Exporter(g, self.file_name, self.folder, binary=False) + save = pp.Exporter( + g, + self.file_name, + self.folder, + binary=False, + export_constants_separately = False + ) save.write_vtu([("dummy_scalar", dummy_scalar), ("dummy_vector", dummy_vector)]) with open(self.folder + self.file_name + "_2.vtu", "r") as content_file: @@ -88,7 +112,13 @@ def test_single_grid_3d_simplex(self): dummy_scalar = np.ones(g.num_cells) * g.dim dummy_vector = np.ones((3, g.num_cells)) * g.dim - save = pp.Exporter(g, self.file_name, self.folder, binary=False) + save = pp.Exporter( + g, + self.file_name, + self.folder, + binary=False, + export_constants_separately = False + ) save.write_vtu([("dummy_scalar", dummy_scalar), ("dummy_vector", dummy_vector)]) with open(self.folder + self.file_name + "_3.vtu", "r") as content_file: @@ -102,7 +132,13 @@ def test_single_grid_3d_cart(self): dummy_scalar = np.ones(g.num_cells) * g.dim dummy_vector = np.ones((3, g.num_cells)) * g.dim - save = pp.Exporter(g, self.file_name, self.folder, binary=False) + save = pp.Exporter( + g, + self.file_name, + self.folder, + binary=False, + export_constants_separately = False + ) save.write_vtu([("dummy_scalar", dummy_scalar), ("dummy_vector", dummy_vector)]) with open(self.folder + self.file_name + "_3.vtu", "r") as content_file: @@ -120,7 +156,13 @@ def test_single_grid_3d_polytop(self): dummy_scalar = np.ones(g.num_cells) * g.dim dummy_vector = np.ones((3, g.num_cells)) * g.dim - save = pp.Exporter(g, self.file_name, self.folder, binary=False) + save = pp.Exporter( + g, + self.file_name, + self.folder, + binary=False, + export_constants_separately = False + ) save.write_vtu([("dummy_scalar", dummy_scalar), ("dummy_vector", dummy_vector)]) with open(self.folder + self.file_name + "_3.vtu", "r") as content_file: @@ -142,7 +184,13 @@ def test_gb_1(self): }, ) - save = pp.Exporter(gb, self.file_name, self.folder, binary=False) + save = pp.Exporter( + gb, + self.file_name, + self.folder, + binary=False, + export_constants_separately = False + ) save.write_vtu(["dummy_scalar", "dummy_vector"]) with open(self.folder + self.file_name + "_1.vtu", "r") as content_file: @@ -153,11 +201,11 @@ def test_gb_1(self): content = self.sliceout(content_file.read()) self.assertTrue(content == self._gb_1_grid_2_vtu()) - # The interface contains only implicit/constant data which is currently not exported. - #with open(self.folder + self.file_name + "_mortar_1.vtu", "r") as content_file: - # content = self.sliceout(content_file.read()) - #self.assertTrue(content == self._gb_1_mortar_grid_vtu()) + with open(self.folder + self.file_name + "_mortar_1.vtu", "r") as content_file: + content = self.sliceout(content_file.read()) + self.assertTrue(content == self._gb_1_mortar_grid_vtu()) + # TODO Do we need this test if we have the subsequent one? def test_gb_2(self): gb, _ = pp.grid_buckets_2d.two_intersecting( [4, 4], y_endpoints=[0.25, 0.75], simplex=False @@ -183,7 +231,13 @@ def test_gb_2(self): }, ) - save = pp.Exporter(gb, self.file_name, self.folder, binary=False) + save = pp.Exporter( + gb, + self.file_name, + self.folder, + binary=False, + export_constants_separately = False + ) save.write_vtu(["dummy_scalar", "dummy_vector", "unique_dummy_scalar"]) with open(self.folder + self.file_name + "_1.vtu", "r") as content_file: @@ -228,8 +282,19 @@ def test_gb_3(self): g_2d = subdomains_2d[0] interfaces = [e for e, d in gb.edges() if d["mortar_grid"].dim == 1] - save = pp.Exporter(gb, self.file_name, self.folder, binary=False) - save.write_vtu([(subdomains_1d, "dummy_scalar"), "dummy_vector", "unique_dummy_scalar", (g_2d, "cc", g_2d.cell_centers)]) + save = pp.Exporter( + gb, + self.file_name, + self.folder, + binary=False, + export_constants_separately = False + ) + save.write_vtu([ + (subdomains_1d, "dummy_scalar"), + "dummy_vector", + "unique_dummy_scalar", + (g_2d, "cc", g_2d.cell_centers) + ]) with open(self.folder + self.file_name + "_1.vtu", "r") as content_file: content = self.sliceout(content_file.read()) @@ -342,6 +407,12 @@ def _single_grid_1d_grid_vtu(self): + +0 +1 +2 + + 1.00000000000e+00 1.00000000000e+00 @@ -360,10 +431,22 @@ def _single_grid_1d_grid_vtu(self): 1.00000000000e+00 - -0 + 1 -2 +1 +1 + + + +0 +0 +0 + + + +0 +0 +0 @@ -533,6 +616,27 @@ def _single_grid_2d_simplex_grid_vtu(self): + +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 + + 2.00000000000e+00 2.00000000000e+00 @@ -611,25 +715,67 @@ def _single_grid_2d_simplex_grid_vtu(self): 2.00000000000e+00 - -0 -1 + 2 -3 -4 -5 -6 -7 -8 -9 -10 -11 -12 -13 -14 -15 -16 -17 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 + + + +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 + + + +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 @@ -832,6 +978,25 @@ def _single_grid_2d_cart_grid_vtu(self): + +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 + + 2.00000000000e+00 2.00000000000e+00 @@ -902,23 +1067,61 @@ def _single_grid_2d_cart_grid_vtu(self): 2.00000000000e+00 - -0 -1 + 2 -3 -4 -5 -6 -7 -8 -9 -10 -11 -12 -13 -14 -15 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 + + + +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 + + + +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 @@ -1001,6 +1204,13 @@ def _single_grid_2d_polytop_grid_vtu(self): + +0 +2 +3 +1 + + 2.00000000000e+00 2.00000000000e+00 @@ -1023,11 +1233,25 @@ def _single_grid_2d_polytop_grid_vtu(self): 2.00000000000e+00 - -0 + 2 -3 -1 +2 +2 +2 + + + +0 +0 +0 +0 + + + +0 +0 +0 +0 @@ -2223,13 +2447,178 @@ def _single_grid_3d_simplex_grid_vtu(self): - -3.00000000000e+00 -3.00000000000e+00 -3.00000000000e+00 -3.00000000000e+00 -3.00000000000e+00 -3.00000000000e+00 + +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 +70 +71 +72 +73 +74 +75 +76 +77 +78 +79 +80 +81 +82 +83 +84 +85 +86 +87 +88 +89 +90 +91 +92 +93 +94 +95 +96 +97 +98 +99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 + + + +3.00000000000e+00 +3.00000000000e+00 +3.00000000000e+00 +3.00000000000e+00 +3.00000000000e+00 +3.00000000000e+00 3.00000000000e+00 3.00000000000e+00 3.00000000000e+00 @@ -2877,169 +3266,499 @@ def _single_grid_3d_simplex_grid_vtu(self): 3.00000000000e+00 - -0 -1 -2 + 3 -4 -5 -6 -7 -8 -9 -10 -11 -12 -13 -14 -15 -16 -17 -18 -19 -20 -21 -22 -23 -24 -25 -26 -27 -28 -29 -30 -31 -32 -33 -34 -35 -36 -37 -38 -39 -40 -41 -42 -43 -44 -45 -46 -47 -48 -49 -50 -51 -52 -53 -54 -55 -56 -57 -58 -59 -60 -61 -62 -63 -64 -65 -66 -67 -68 -69 -70 -71 -72 -73 -74 -75 -76 -77 -78 -79 -80 -81 -82 -83 -84 -85 -86 -87 -88 -89 -90 -91 -92 -93 -94 -95 -96 -97 -98 -99 -100 -101 -102 -103 -104 -105 -106 -107 -108 -109 -110 -111 -112 -113 -114 -115 -116 -117 -118 -119 -120 -121 -122 -123 -124 -125 -126 -127 -128 -129 -130 -131 -132 -133 -134 -135 -136 -137 -138 -139 -140 -141 -142 -143 -144 -145 -146 -147 -148 -149 -150 -151 -152 -153 -154 -155 -156 -157 -158 -159 -160 -161 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 + + + +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 + + + +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 @@ -4086,6 +4805,73 @@ def _single_grid_3d_cart_grid_vtu(self): + +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 +18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 +31 +32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 +51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 + + 3.00000000000e+00 3.00000000000e+00 @@ -4348,71 +5134,205 @@ def _single_grid_3d_cart_grid_vtu(self): 3.00000000000e+00 - + +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 + + + +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 + + + +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -10 -11 -12 -13 -14 -15 -16 -17 -18 -19 -20 -21 -22 -23 -24 -25 -26 -27 -28 -29 -30 -31 -32 -33 -34 -35 -36 -37 -38 -39 -40 -41 -42 -43 -44 -45 -46 -47 -48 -49 -50 -51 -52 -53 -54 -55 -56 -57 -58 -59 -60 -61 -62 -63 @@ -5080,6 +6000,14 @@ def _single_grid_3d_polytop_grid_vtu(self): + +3 +1 +2 +0 +4 + + 3.00000000000e+00 3.00000000000e+00 @@ -5106,12 +6034,28 @@ def _single_grid_3d_polytop_grid_vtu(self): 3.00000000000e+00 - + 3 -1 -2 +3 +3 +3 +3 + + + +0 +0 +0 +0 +0 + + + +0 +0 +0 +0 0 -4 @@ -5174,6 +6118,13 @@ def _gb_1_grid_1_vtu(self): + +0 +1 +2 +3 + + 1.00000000000e+00 1.00000000000e+00 @@ -5196,11 +6147,32 @@ def _gb_1_grid_1_vtu(self): 1.00000000000e+00 - -0 + +1 +1 +1 1 -2 -3 + + + +1 +1 +1 +1 + + + +0 +0 +0 +0 + + + +0 +0 +0 +0 @@ -5418,6 +6390,25 @@ def _gb_1_grid_2_vtu(self): + +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 + + 2.00000000000e+00 2.00000000000e+00 @@ -5488,23 +6479,80 @@ def _gb_1_grid_2_vtu(self): 2.00000000000e+00 - -0 -1 + 2 -3 -4 -5 -6 -7 -8 -9 -10 -11 -12 -13 -14 -15 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 + + + +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 + + + +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 + + + +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 @@ -5564,14 +6612,14 @@ def _gb_1_mortar_grid_vtu(self): 3 3 4 -4 -5 5 6 6 7 7 8 +8 +9 @@ -5598,6 +6646,28 @@ def _gb_1_mortar_grid_vtu(self): + +0 +1 +2 +3 +4 +5 +6 +7 + + + +1 +1 +1 +1 +1 +1 +1 +1 + + 0 0 @@ -5630,17 +6700,6 @@ def _gb_1_mortar_grid_vtu(self): 2 2 - - -0 -1 -2 -3 -4 -5 -6 -7 - @@ -5725,6 +6784,15 @@ def _gb_2_grid_1_vtu(self): + +0 +1 +2 +3 +0 +1 + + 1.00000000000e+00 1.00000000000e+00 @@ -5755,13 +6823,40 @@ def _gb_2_grid_1_vtu(self): 1.00000000000e+00 - -0 + +1 +1 +1 +1 +1 +1 + + + +1 +1 +1 1 2 -3 -4 -5 +2 + + + +0 +0 +0 +0 +0 +0 + + + +0 +0 +0 +0 +0 +0 @@ -5985,6 +7080,25 @@ def _gb_2_grid_2_vtu(self): + +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 + + 2.00000000000e+00 2.00000000000e+00 @@ -6055,23 +7169,80 @@ def _gb_2_grid_2_vtu(self): 2.00000000000e+00 - -0 -1 + +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 2 -3 -4 -5 -6 -7 -8 -9 -10 -11 -12 -13 -14 -15 + + + +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 + + + +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 + + + +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 @@ -6211,6 +7382,21 @@ def _gb_2_mortar_grid_1_vtu(self): + +0 +1 +2 +3 +4 +5 +6 +7 +0 +1 +2 +3 + + 0.00000000000e+00 0.00000000000e+00 @@ -6225,6 +7411,66 @@ def _gb_2_mortar_grid_1_vtu(self): 0.00000000000e+00 0.00000000000e+00 + + +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 + + + +0 +0 +0 +0 +0 +0 +0 +0 +1 +1 +1 +1 + + + +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 + + + +1 +1 +1 +1 +2 +2 +2 +2 +1 +1 +2 +2 + 0.00000000000e+00 @@ -6240,21 +7486,6 @@ def _gb_2_mortar_grid_1_vtu(self): 0.00000000000e+00 0.00000000000e+00 - - -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -10 -11 - @@ -6339,6 +7570,15 @@ def _gb_3_grid_1_vtu(self): + +0 +1 +2 +3 +0 +1 + + 1.00000000000e+00 1.00000000000e+00 @@ -6369,13 +7609,40 @@ def _gb_3_grid_1_vtu(self): 1.00000000000e+00 - -0 + +1 +1 +1 +1 +1 +1 + + + +1 +1 +1 1 2 -3 -4 -5 +2 + + + +0 +0 +0 +0 +0 +0 + + + +0 +0 +0 +0 +0 +0 @@ -6649,6 +7916,25 @@ def _gb_3_grid_2_vtu(self): 8.75000000000e-01 0.00000000000e+00 + + +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 + 2.00000000000e+00 @@ -6701,23 +7987,80 @@ def _gb_3_grid_2_vtu(self): 2.00000000000e+00 - -0 -1 + 2 -3 -4 -5 -6 -7 -8 -9 -10 -11 -12 -13 -14 -15 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 + + + +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 + + + +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 + + + +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 @@ -6857,6 +8200,81 @@ def _gb_3_mortar_grid_1_vtu(self): + +0 +1 +2 +3 +4 +5 +6 +7 +0 +1 +2 +3 + + + +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 + + + +0 +0 +0 +0 +0 +0 +0 +0 +1 +1 +1 +1 + + + +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 + + + +1 +1 +1 +1 +2 +2 +2 +2 +1 +1 +2 +2 + + 0.00000000000e+00 0.00000000000e+00 @@ -6871,21 +8289,6 @@ def _gb_3_mortar_grid_1_vtu(self): 0.00000000000e+00 0.00000000000e+00 - - -0 -1 -2 -3 -4 -5 -6 -7 -8 -9 -10 -11 - From cb8a4f0de32749fd0d207d375dea62a9f706d871 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Tue, 17 May 2022 23:33:34 +0200 Subject: [PATCH 38/83] ENH/TST: Allow for adding explicit constant data. Add dedicated test. --- src/porepy/viz/exporter.py | 33 ++- tests/unit/test_vtk.py | 577 +++++++++++++++++++++++++++++++++++++ 2 files changed, 606 insertions(+), 4 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 3ab33148d4..70a7069d5c 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -187,10 +187,35 @@ def change_name(self, file_name: str) -> None: """ self.file_name = file_name - # Reset control parameter for the reuse of fixed data in the write routine. - # One could consider also allowing to reuse data from files with different - # names, but at the moment a more light-weight variant has been chosen. - self._prev_exported_time_step = None + def add_constant_data( + self, + data: Optional[Union[DataInput, List[DataInput]]] = None, + ) -> None: + """ + Collect constant data (after unifying). And later export to own files. + + Parameters: + data (Union[DataInput, List[DataInput]], optional): node and + edge data, prescribed through strings, or tuples of + grids/edges, keys and values. If not provided only + geometical infos are exported. + """ + # Identify change in constant data. Has the effect that + # the constant data container will be exported at the + # next application of write_vtu(). + self._exported_constant_data_up_to_date: bool = False + + # Preprocessing of the user-defined constant data + # Has two main goals: + # 1. Sort wrt. whether data is associated to subdomains or interfaces. + # 2. Unify data type. + subdomain_data, interface_data = self._sort_and_unify_data(data) + + # Add the user-defined data to the containers for constant data. + for key, value in subdomain_data.items(): + self._constant_subdomain_data[key] = value.copy() + for key, value in interface_data.items(): + self._constant_interface_data[key] = value.copy() def write_vtu( self, diff --git a/tests/unit/test_vtk.py b/tests/unit/test_vtk.py index 8ab5e1d1cc..11b022bf59 100644 --- a/tests/unit/test_vtk.py +++ b/tests/unit/test_vtk.py @@ -308,6 +308,30 @@ def test_gb_3(self): content = self.sliceout(content_file.read()) self.assertTrue(content == self._gb_3_mortar_grid_1_vtu()) + def test_constant_data(self): + g = pp.StructuredTriangleGrid([3] * 2, [1] * 2) + g.compute_geometry() + + dummy_scalar = np.ones(g.num_cells) * g.dim + dummy_vector = np.ones((3, g.num_cells)) * g.dim + + save = pp.Exporter( + g, + self.file_name, + self.folder, + binary=False, + ) + save.add_constant_data([(g, "cc", g.cell_centers)]) + save.write_vtu([("dummy_scalar", dummy_scalar), ("dummy_vector", dummy_vector)]) + + with open(self.folder + self.file_name + "_2.vtu", "r") as content_file: + content = self.sliceout(content_file.read()) + self.assertTrue(content == self._nonconstant_data_grid_vtu()) + + with open(self.folder + self.file_name + "_constant_2.vtu", "r") as content_file: + content = self.sliceout(content_file.read()) + self.assertTrue(content == self._constant_data_grid_vtu()) + # NOTE: This test is not directly testing Exporter, but the capability to export networks. # Is this a used capability? If not, suggest removing the test and the corresponding capability. def test_fractures_2d(self): @@ -8289,6 +8313,559 @@ def _gb_3_mortar_grid_1_vtu(self): 0.00000000000e+00 0.00000000000e+00 + + + + + +""" + + def _nonconstant_data_grid_vtu(self): + return """ + + + + + + +0.00000000000e+00 +0.00000000000e+00 +0.00000000000e+00 +3.33333333333e-01 +0.00000000000e+00 +0.00000000000e+00 +6.66666666667e-01 +0.00000000000e+00 +0.00000000000e+00 +1.00000000000e+00 +0.00000000000e+00 +0.00000000000e+00 +0.00000000000e+00 +3.33333333333e-01 +0.00000000000e+00 +3.33333333333e-01 +3.33333333333e-01 +0.00000000000e+00 +6.66666666667e-01 +3.33333333333e-01 +0.00000000000e+00 +1.00000000000e+00 +3.33333333333e-01 +0.00000000000e+00 +0.00000000000e+00 +6.66666666667e-01 +0.00000000000e+00 +3.33333333333e-01 +6.66666666667e-01 +0.00000000000e+00 +6.66666666667e-01 +6.66666666667e-01 +0.00000000000e+00 +1.00000000000e+00 +6.66666666667e-01 +0.00000000000e+00 +0.00000000000e+00 +1.00000000000e+00 +0.00000000000e+00 +3.33333333333e-01 +1.00000000000e+00 +0.00000000000e+00 +6.66666666667e-01 +1.00000000000e+00 +0.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 +0.00000000000e+00 + + + + + +0 +1 +5 +0 +4 +5 +1 +2 +6 +1 +5 +6 +2 +3 +7 +2 +6 +7 +4 +5 +9 +4 +8 +9 +5 +6 +10 +5 +9 +10 +6 +7 +11 +6 +10 +11 +8 +9 +13 +8 +12 +13 +9 +10 +14 +9 +13 +14 +10 +11 +15 +10 +14 +15 + + + +3 +6 +9 +12 +15 +18 +21 +24 +27 +30 +33 +36 +39 +42 +45 +48 +51 +54 + + + +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 + + + + + +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 + + + +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 +2.00000000000e+00 + + + + + + +""" + + def _constant_data_grid_vtu(self): + return """ + + + + + + +0.00000000000e+00 +0.00000000000e+00 +0.00000000000e+00 +3.33333333333e-01 +0.00000000000e+00 +0.00000000000e+00 +6.66666666667e-01 +0.00000000000e+00 +0.00000000000e+00 +1.00000000000e+00 +0.00000000000e+00 +0.00000000000e+00 +0.00000000000e+00 +3.33333333333e-01 +0.00000000000e+00 +3.33333333333e-01 +3.33333333333e-01 +0.00000000000e+00 +6.66666666667e-01 +3.33333333333e-01 +0.00000000000e+00 +1.00000000000e+00 +3.33333333333e-01 +0.00000000000e+00 +0.00000000000e+00 +6.66666666667e-01 +0.00000000000e+00 +3.33333333333e-01 +6.66666666667e-01 +0.00000000000e+00 +6.66666666667e-01 +6.66666666667e-01 +0.00000000000e+00 +1.00000000000e+00 +6.66666666667e-01 +0.00000000000e+00 +0.00000000000e+00 +1.00000000000e+00 +0.00000000000e+00 +3.33333333333e-01 +1.00000000000e+00 +0.00000000000e+00 +6.66666666667e-01 +1.00000000000e+00 +0.00000000000e+00 +1.00000000000e+00 +1.00000000000e+00 +0.00000000000e+00 + + + + + +0 +1 +5 +0 +4 +5 +1 +2 +6 +1 +5 +6 +2 +3 +7 +2 +6 +7 +4 +5 +9 +4 +8 +9 +5 +6 +10 +5 +9 +10 +6 +7 +11 +6 +10 +11 +8 +9 +13 +8 +12 +13 +9 +10 +14 +9 +13 +14 +10 +11 +15 +10 +14 +15 + + + +3 +6 +9 +12 +15 +18 +21 +24 +27 +30 +33 +36 +39 +42 +45 +48 +51 +54 + + + +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 +5 + + + + + +2.22222222222e-01 +1.11111111111e-01 +0.00000000000e+00 +1.11111111111e-01 +2.22222222222e-01 +0.00000000000e+00 +5.55555555556e-01 +1.11111111111e-01 +0.00000000000e+00 +4.44444444444e-01 +2.22222222222e-01 +0.00000000000e+00 +8.88888888889e-01 +1.11111111111e-01 +0.00000000000e+00 +7.77777777778e-01 +2.22222222222e-01 +0.00000000000e+00 +2.22222222222e-01 +4.44444444444e-01 +0.00000000000e+00 +1.11111111111e-01 +5.55555555556e-01 +0.00000000000e+00 +5.55555555556e-01 +4.44444444444e-01 +0.00000000000e+00 +4.44444444444e-01 +5.55555555556e-01 +0.00000000000e+00 +8.88888888889e-01 +4.44444444444e-01 +0.00000000000e+00 +7.77777777778e-01 +5.55555555556e-01 +0.00000000000e+00 +2.22222222222e-01 +7.77777777778e-01 +0.00000000000e+00 +1.11111111111e-01 +8.88888888889e-01 +0.00000000000e+00 +5.55555555556e-01 +7.77777777778e-01 +0.00000000000e+00 +4.44444444444e-01 +8.88888888889e-01 +0.00000000000e+00 +8.88888888889e-01 +7.77777777778e-01 +0.00000000000e+00 +7.77777777778e-01 +8.88888888889e-01 +0.00000000000e+00 + + + +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +10 +11 +12 +13 +14 +15 +16 +17 + + + +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 + + + +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 + + + +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 + From db8612653c27dc9d00df44b571302b9d7a354122 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Wed, 18 May 2022 00:43:28 +0200 Subject: [PATCH 39/83] ENH: write_pvd prints now both subdomain and interface data plus their constants. --- src/porepy/viz/exporter.py | 119 +++++++++++++++++++++++++++---------- 1 file changed, 87 insertions(+), 32 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 70a7069d5c..2041f1388e 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -162,10 +162,13 @@ def __init__( self._time_step_counter: int = 0 # Storage for file name extensions for time steps - self._exported_time_step_file_names: List[int] = [] + self._exported_timesteps: List[int] = [] - # Reference to the last time step used for exporting constant data. - self._time_step_counter_constant_data: int = 0 + # Reference to the last time step used for exporting constant data. + self._time_step_constants: int = 0 + # Storage for file name extensions for time steps, regarding constant data. + self._exported_timesteps_constants: List[int] = [] + # Identifier for whether constant data is up-to-date. self._exported_constant_data_up_to_date: bool = False # Parameter to be used in several occasions for adding time stamps. @@ -175,7 +178,6 @@ def __init__( if "numba" not in sys.modules: raise NotImplementedError("The sorting algorithm requires numba to be installed.") - # TODO isn't this method obsolete? def change_name(self, file_name: str) -> None: """ Change the root name of the files, useful when different keywords are @@ -220,7 +222,7 @@ def add_constant_data( def write_vtu( self, data: Optional[Union[DataInput, List[DataInput]]] = None, - time_dependent: Optional[bool] = False, # TODO in use? e.g., when not each time step is printed? or is time_step usually used? + time_dependent: Optional[bool] = False, time_step: Optional[int] = None, gb: Optional[Union[pp.Grid, pp.GridBucket]] = None, ) -> None: @@ -272,7 +274,7 @@ def write_vtu( # If time step is prescribed, store it. if time_step is not None: - self._exported_time_step_file_names.append(time_step) + self._exported_timesteps.append(time_step) # Preprocessing step with two main goals: # 1. Sort wrt. whether data is associated to subdomains or interfaces. @@ -300,12 +302,15 @@ def write_vtu( # Store the time step counter for later reference # (required for pvd files) - self._time_step_counter_constant_data = time_step + self._time_step_constant_data = time_step # Identify the constant data as fixed. Has the effect # that the constant data won't be exported if not the # mesh is updated or new constant data is added. self._exported_constant_data_up_to_date = True + + # Store the timestep refering to the origin of the constant data + self._exported_timesteps_constants.append(self._time_step_constant_data) else: # Append constant subdomain and interface data to the # standard containers for subdomain and interface data. @@ -323,13 +328,11 @@ def write_vtu( # Export grid bucket to pvd format file_name = self._make_file_name(self.file_name, time_step, extension=".pvd") file_name = self._append_folder_name(self.folder_name, file_name) - # TODO include the constant data in _export_gb_pvd self._export_gb_pvd(file_name, time_step) - # TODO does not contain all keywords as in write_vtu. make consistent? def write_pvd( self, - timestep: np.ndarray, + times: np.ndarray, file_extension: Optional[Union[np.ndarray, List[int]]] = None, ) -> None: """ @@ -340,22 +343,37 @@ def write_pvd( We assume that the VTU associated files are in the same folder. Parameters: - timestep: numpy of times to be exported. These will be the time associated with - indivdiual time steps in, say, Paraview. By default, the times will be - associated with the order in which the time steps were exported. This can - be overridden by the file_extension argument. + times (np.ndarray): array of times to be exported. These will be the times + associated with indivdiual time steps in, say, Paraview. By default, + the times will be associated with the order in which the time steps were + exported. This can be overridden by the file_extension argument. file_extension (np.array-like, optional): End of file names used in the export of individual time steps, see self.write_vtu(). If provided, it should have the same length as time. If not provided, the file names will be picked from those used when writing individual time steps. - """ if file_extension is None: - file_extension = self._exported_time_step_file_names + file_extension = self._exported_timesteps + file_extension_constants = self._exported_timesteps_constants + indices = [self._exported_timesteps.index(e) for e in file_extension] + file_extension_constants = [self._exported_timesteps_constants[i] for i in indices] elif isinstance(file_extension, np.ndarray): file_extension = file_extension.tolist() - assert file_extension is not None # make mypy happy + # Make sure that the inputs are consistent + assert(len(file_extension) == times.shape[0]) + + # Extract the time steps related to constant data and + # complying with file_extension. Implicitly test wheter + # file_extension is a subset of _exported_timesteps. + indices = [self._exported_timesteps.index(e) for e in file_extension] + file_extension_constants = [self._exported_timesteps_constants[i] for i in indices] + + # Make mypy happy + assert file_extension is not None + + # Perform the same procedure as in _export_gb_pvd + # but looping over all designated time steps. o_file = open(self._append_folder_name(self.folder_name, self.file_name) + ".pvd", "w") b = "LittleEndian" if sys.byteorder == "little" else "BigEndian" @@ -369,11 +387,51 @@ def write_pvd( o_file.write(header) fm = '\t\n' - for time, fn in zip(timestep, file_extension): + for time, fn, fn_constants in zip( + times, + file_extension, + file_extension_constants + ): + # Go through all possible data types, analogously to + # _export_gb_pvd. + + # Subdomain data for dim in self.dims: - o_file.write( - fm % (time, self._make_file_name(self.file_name, fn, dim)) - ) + if self.meshio_geom[dim] is not None: + o_file.write( + fm % (time, self._make_file_name(self.file_name, fn, dim)) + ) + + # Interface data. + for dim in self.m_dims: + if self.m_meshio_geom[dim] is not None: + o_file.write( + fm % (time, self._make_file_name(self.file_name + "_mortar", fn, dim)) + ) + + # Constant data. + if self.export_constants_separately: + # Constant subdomain data. + for dim in self.dims: + if self.meshio_geom[dim] is not None: + o_file.write( + fm % (time, self._make_file_name( + self.file_name + "_constant", + fn_constants, + dim + )) + ) + + # Constant interface data. + for dim in self.m_dims: + if self.m_meshio_geom[dim] is not None: + o_file.write( + fm % (time, self._make_file_name( + self.file_name + "_constant_mortar", + fn_constants, + dim + )) + ) o_file.write("\n" + "") o_file.close() @@ -874,19 +932,18 @@ def _export_gb_pvd(self, file_name: str, time_step: int) -> None: o_file.write(header) fm = '\t\n' - # Include the grids of all dimensions, but only if the grids of this - # dimension are included in the vtk export + # Subdomain data. for dim in self.dims: if self.meshio_geom[dim] is not None: o_file.write( fm % self._make_file_name(self.file_name, time_step, dim) ) - # Same for mortar grids. + # Interface data. for dim in self.m_dims: if self.m_meshio_geom[dim] is not None: o_file.write( - fm % self._make_file_name(self.file_name + "_mortar_", time_step, dim) + fm % self._make_file_name(self.file_name + "_mortar", time_step, dim) ) # If constant data is exported to separate vtu files, also include @@ -899,7 +956,7 @@ def _export_gb_pvd(self, file_name: str, time_step: int) -> None: o_file.write( fm % self._make_file_name( self.file_name + "_constant", - self._time_step_counter_constant_data, + self._time_step_constants, dim ) ) @@ -909,8 +966,8 @@ def _export_gb_pvd(self, file_name: str, time_step: int) -> None: if self.m_meshio_geom[dim] is not None: o_file.write( fm % self._make_file_name( - self.file_name + "_constant_mortar_", - self._time_step_counter_constant_data, + self.file_name + "_constant_mortar", + self._time_step_constants, dim ) ) @@ -1561,9 +1618,7 @@ def _write( file_name (str): name of final file of export meshio_geom (Meshio_Geom): Namedtuple of points, connectivity information, and cell ids in - meshio format. - - for a single dimension + meshio format (for a single dimension). """ # Initialize empty cell data dictinary cell_data = {} @@ -1572,7 +1627,7 @@ def _write( # Utilize meshio_geom for this. for field in fields: - # FIXME as implemented now, field.values should never be None. + # TODO RM? as implemented now, field.values should never be None. if field.values is None: continue From 820d5e7b9dd73d8884c1149c663ba2d3934ef4f3 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Wed, 18 May 2022 00:45:46 +0200 Subject: [PATCH 40/83] MAINT: Remove redundant typing (input only occurs as int). --- src/porepy/viz/exporter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 2041f1388e..35ee97f68f 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -1680,7 +1680,7 @@ def _append_folder_name(self, folder_name: Optional[str] = None, name: str = "") def _make_file_name( self, file_name: str, - time_step: Optional[Union[float,int]] = None, + time_step: Optional[int] = None, dim: Optional[int] = None, extension: str = ".vtu", ) -> str: From f2e0cf18d7224392d952c8679b033c1471addfb9 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Wed, 18 May 2022 09:55:28 +0200 Subject: [PATCH 41/83] DOC: Update the general description of Exporter. --- src/porepy/viz/exporter.py | 43 +++++++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 17 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 35ee97f68f..47c79f3707 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -32,7 +32,6 @@ MD_Meshio_Geom = Dict[int, Meshio_Geom] class Exporter: - # TODO DOC """ Class for exporting data to vtu files. @@ -46,34 +45,43 @@ class Exporter: fixed_grid: (optional) in a time dependent simulation specify if the grid changes in time or not. The default is True. binary: (optional) export in binary format, default is True. + export_constants_separately: (optional) export constants in separate files + to potentially reduce memory footprint, default is True. How to use: - If you need to export a single grid: + + The Exporter allows for various way to express which state variables, + on which grids, and which extra data should be exported. A thorough + demonstration is available as a dedicated tutorial. Check out + tutorials/exporter.ipynb. + + Here, merely a brief demonstration of the use of Exporter is presented. + + If you need to export the state with key "pressure" on a single grid: save = Exporter(g, "solution", folder_name="results") - save.write_vtu({"cells_id": cells_id, "pressure": pressure}) + save.write_vtu(["pressure"]) - In a time loop: + In a time loop, if you need to export states with keys "pressure" and + "displacement" stored in a grid bucket. save = Exporter(gb, "solution", folder_name="results") while time: - save.write_vtu({"conc": conc}, time_step=i) - save.write_pvd(steps*deltaT) - - if you need to export the state of variables as stored in the GridBucket: - save = Exporter(gb, "solution", folder_name="results") - # export the field stored in data[pp.STATE]["pressure"] - save.write_vtu(gb, ["pressure"]) + save.write_vtu(["pressure", "displacement"], time_step=i) + save.write_pvd(times) - In a time loop: - while time: - save.write_vtu(["conc"], time_step=i) - save.write_pvd(steps*deltaT) + where times, is a list of actual times (not time steps), associated + to the previously exported time steps. + + In general, pvd files gather data exported in separate files, including + data on differently dimensioned grids, constant data, and finally time steps. + + + deltaT is a scalar, denoting the time step size, and steps In the case of different keywords, change the file name with "change_name". NOTE: the following names are reserved for data exporting: grid_dim, - is_mortar, mortar_side, cell_id - + is_mortar, mortar_side, cell_id, grid_node_number, grid_edge_number """ # Introduce types used below @@ -178,6 +186,7 @@ def __init__( if "numba" not in sys.modules: raise NotImplementedError("The sorting algorithm requires numba to be installed.") + # TODO in use by anyone? def change_name(self, file_name: str) -> None: """ Change the root name of the files, useful when different keywords are From 210b44092abd2dd1fe5e96f3cbb3e55b6a9e7011 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Wed, 18 May 2022 10:21:09 +0200 Subject: [PATCH 42/83] ENH: Add tutorial demonstrating the use of Exporter. --- tutorials/exporter.ipynb | 426 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 426 insertions(+) create mode 100644 tutorials/exporter.ipynb diff --git a/tutorials/exporter.ipynb b/tutorials/exporter.ipynb new file mode 100644 index 0000000000..62db92f95e --- /dev/null +++ b/tutorials/exporter.ipynb @@ -0,0 +1,426 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Introduction\n", + "Currently, the standard procedure within PorePy is to export data to vtu and pvd format for visualization with ParaView. This tutorial explains how to use the 'Exporter'. In particular, it will showcase different ways to address data, how constant-in-time data is handled, and how pvd-files are managed. \n", + "\n", + "First, an example data set is defined, then the actual exporter is defined, before currently all possible ways to export data are demonstrated.\n", + "\n", + "NOTE: Related but not necessary for this tutorial: it is highly recommended to read the ParaView documentation. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example contact mechanics model for a mixed-dimensional geometry\n", + "In order to illustrate the capability and explain the use of the Exporter, we consider a ContactMechanicsBiot model for a two-dimensional fractured geometry. The mixed-dimensional geometry consists of a 2D square and two crossing 1D fractures." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n", + "Bad key text.latex.preview in file /home/jakub/anaconda3/lib/python3.7/site-packages/matplotlib/mpl-data/stylelib/_classic_test.mplstyle, line 123 ('text.latex.preview : False')\n", + "You probably need to get an updated matplotlibrc file from\n", + "https://github.com/matplotlib/matplotlib/blob/v3.5.1/matplotlibrc.template\n", + "or from the matplotlib source distribution\n", + "\n", + "Bad key mathtext.fallback_to_cm in file /home/jakub/anaconda3/lib/python3.7/site-packages/matplotlib/mpl-data/stylelib/_classic_test.mplstyle, line 155 ('mathtext.fallback_to_cm : True # When True, use symbols from the Computer Modern')\n", + "You probably need to get an updated matplotlibrc file from\n", + "https://github.com/matplotlib/matplotlib/blob/v3.5.1/matplotlibrc.template\n", + "or from the matplotlib source distribution\n", + "\n", + "Bad key savefig.jpeg_quality in file /home/jakub/anaconda3/lib/python3.7/site-packages/matplotlib/mpl-data/stylelib/_classic_test.mplstyle, line 418 ('savefig.jpeg_quality: 95 # when a jpeg is saved, the default quality parameter.')\n", + "You probably need to get an updated matplotlibrc file from\n", + "https://github.com/matplotlib/matplotlib/blob/v3.5.1/matplotlibrc.template\n", + "or from the matplotlib source distribution\n", + "\n", + "Bad key keymap.all_axes in file /home/jakub/anaconda3/lib/python3.7/site-packages/matplotlib/mpl-data/stylelib/_classic_test.mplstyle, line 466 ('keymap.all_axes : a # enable all axes')\n", + "You probably need to get an updated matplotlibrc file from\n", + "https://github.com/matplotlib/matplotlib/blob/v3.5.1/matplotlibrc.template\n", + "or from the matplotlib source distribution\n", + "\n", + "Bad key animation.avconv_path in file /home/jakub/anaconda3/lib/python3.7/site-packages/matplotlib/mpl-data/stylelib/_classic_test.mplstyle, line 477 ('animation.avconv_path: avconv # Path to avconv binary. Without full path')\n", + "You probably need to get an updated matplotlibrc file from\n", + "https://github.com/matplotlib/matplotlib/blob/v3.5.1/matplotlibrc.template\n", + "or from the matplotlib source distribution\n", + "\n", + "Bad key animation.avconv_args in file /home/jakub/anaconda3/lib/python3.7/site-packages/matplotlib/mpl-data/stylelib/_classic_test.mplstyle, line 479 ('animation.avconv_args: # Additional arguments to pass to avconv')\n", + "You probably need to get an updated matplotlibrc file from\n", + "https://github.com/matplotlib/matplotlib/blob/v3.5.1/matplotlibrc.template\n", + "or from the matplotlib source distribution\n" + ] + } + ], + "source": [ + "import numpy as np\n", + "import porepy as pp\n", + "\n", + "class BiotFractured(pp.ContactMechanicsBiot):\n", + "\n", + " def create_grid(self) -> None:\n", + " # Define domain\n", + " self.box: Dict = pp.geometry.bounding_box.from_points(\n", + " np.array([[0, 1], [0, 1]])\n", + " )\n", + "\n", + " # Define fracture network\n", + " # ... pts\n", + " frac_a = np.array([[1 / 2.0, 0.0], [1 / 2.0, 0.5]]).T\n", + " frac_b = np.array([[0.16, 0.2], [1, 0.5]]).T\n", + "\n", + " intersecting_fractures = True\n", + "\n", + " if intersecting_fractures:\n", + " frac_pts = np.hstack((frac_a, frac_b))\n", + " frac_connectivity = np.array([[0, 1], [2,3]]).T\n", + " else:\n", + " frac_pts = frac_a\n", + " frac_connectivity = np.array([[0], [1]])\n", + "\n", + " # ... network\n", + " network_2d = pp.FractureNetwork2d(frac_pts, frac_connectivity, self.box)\n", + "\n", + " # Generate mixed-dimensional mesh\n", + " mesh_size_frac = self.params.get(\"mesh_size_frac\", 1e-1)\n", + " mesh_size_bound = self.params.get(\"mesh_size_bound\", 1e-1)\n", + " mesh_args = {\n", + " \"mesh_size_frac\": mesh_size_frac,\n", + " \"mesh_size_bound\": mesh_size_bound\n", + " }\n", + " self.gb = network_2d.mesh(mesh_args)\n", + "\n", + " pp.contact_conditions.set_projections(self.gb)\n", + "\n", + "params = {\"use_ad\": True}\n", + "#model = pp.ContactMechanicsBiot(params)\n", + "model = BiotFractured(params)\n", + "model.prepare_simulation()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The default data of the model is stored as pp.STATE in the grid bucket. Let's have a look:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Keys of the states defined on subdomains: {'p', 'u', 'contact_traction'}\n", + "Keys of the states defined on interfaces: {'mortar_u', 'mortar_p'}\n" + ] + } + ], + "source": [ + "# Determine all keys of all states on all subdomains\n", + "subdomain_states = []\n", + "for g, d in model.gb.nodes():\n", + " subdomain_states += d[pp.STATE].keys()\n", + "# The key pp.ITERATE is no actual state.\n", + "subdomain_states = set(subdomain_states) - set([pp.ITERATE])\n", + "print(\"Keys of the states defined on subdomains:\", subdomain_states)\n", + "\n", + "# Determine all keys of all states on all interfaces\n", + "interface_states = []\n", + "for g, d in model.gb.edges():\n", + " interface_states += d[pp.STATE].keys()\n", + "# The key pp.ITERATE is no actual state.\n", + "interface_states = set(interface_states) - set([pp.ITERATE])\n", + "print(\"Keys of the states defined on interfaces:\", set(interface_states)) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Defining the exporter\n", + "The definition of an object of type pp.Exporter, merely a grid bucket as well as a target is required for the output." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "exporter = pp.Exporter(model.gb, file_name=\"file\", folder_name=\"folder\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In the following, we showcase how to use the main subroutines for exporting data:\n", + "- write_vtu()\n", + "- write_pvd()\n", + "The first, addresses the export of data for a specific time step, while the latter, gather the previous exports and collects them in a single file. This allows an easier analysis in ParaView." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example 1: Exporting states\n", + "Data stored in the grid bucket under 'pp.STATE' can be simply exported by addressing their keys using the routine 'write_vtu()'. We define a dedicated exporter for this task." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "exporter_1 = pp.Exporter(model.gb, file_name=\"example-1\", folder_name=\"exporter-tutorial\")\n", + "exporter_1.write_vtu([\"p\", \"u\", \"mortar_p\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note, that here all available representation (i.e., on all dimensions) of the states will be exported." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example 2: Exporting states on specified grids\n", + "Similar to Example 1, we will again export states by addressing their keys, but target only a subset of grids. For instance, we fetch the grids for the subdomains and interface.\n", + "\n", + "NOTE: For now, one has to make sure that subsets of the mixed-dimensional grid contain all grids of a particular dimension." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "grids_1d = model.gb.get_grids(lambda g: g.dim == 1).tolist()\n", + "grids_2d = model.gb.get_grids(lambda g: g.dim == 2).tolist()\n", + "edges_1d = [e for e, d in model.gb.edges() if d[\"mortar_grid\"].dim == 1]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And as a simple example extract the 2D subdomain:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "g_2d = grids_2d[0]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We export pressure on all 1D subdomains, displacements on a single 2D subdomain, and the mortars on all interfaces. For this, we use tuples of grid(s) and keys. In order to not overwrite the previous data, we define a new exporter." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "exporter_2 = pp.Exporter(model.gb, \"example-2\", \"exporter-tutorial\")\n", + "exporter_2.write_vtu([(grids_1d, \"p\"), (g_2d, \"u\"), (edges_1d, \"mortar_p\")])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example 3: Exporting explicitly defined data\n", + "There exists the possibility to export data which is not stored in the grid bucket under 'pp.STATE'. This capability requires defining tuples of (1) a single grid, (2) a key, and (3) the data vector. For example, let's export the cell centers of the 2D subdomain 'g_2d', as well as all interfaces. Again, we define a dedicated exporter for this task.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "subdomain_data = [(g_2d, \"cc\", g_2d.cell_centers)]\n", + "interface_data = [\n", + " (edges_1d[i], \"cc_e\", model.gb.edge_props(edges_1d[i])[\"mortar_grid\"].cell_centers)\n", + " for i in range(len(edges_1d))\n", + "]\n", + "\n", + "exporter_3 = pp.Exporter(model.gb, \"example-3\", \"exporter-tutorial\")\n", + "exporter_3.write_vtu(subdomain_data + interface_data)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example 4: Flexibility in the input arguments\n", + "The export allows for an arbitrary combination of all previous ways to export data." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "exporter_4 = pp.Exporter(model.gb, \"example-4\", \"exporter-tutorial\")\n", + "exporter_4.write_vtu([(g_2d, \"cc\", g_2d.cell_centers), (grids_1d, \"p\"), \"u\"] + interface_data)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example 5: Exporting data in a time series\n", + "Data can also be exported in a time series, and the Exporter takes care of managing the file names. The user will only have to prescribe the time step number. Consider a time series consisting of 5 steps. For simplicity, we consider an analogous situation as in Example 1." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "exporter_5 = pp.Exporter(model.gb, \"example-5\", \"exporter-tutorial\")\n", + "for step in range(5):\n", + " # Data may change\n", + " exporter_5.write_vtu([\"p\", \"u\", \"mortar_p\"], time_step=step)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example 6: Exporting constant data\n", + "The export of both grid and geometry related data as well as heterogeneous material parameters may be of interest. However, these often do only change very seldomly in time or are even constant in time. In order to save storage space, constant data is stored separately. A multirate approach is used to address slowly changing \"constant\" data, which results in an extra set of output files. Everytime constant data has to be updated (in a time series), the output files are updated as well. " + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "exporter_6_a = pp.Exporter(model.gb, \"example-6-a\", \"exporter-tutorial\")\n", + "# Define some 'constant' data\n", + "exporter_6_a.add_constant_data([(g_2d, \"cc\", g_2d.cell_centers)])\n", + "for step in range(5):\n", + " # Update the previously defined 'constant' data\n", + " if step == 2:\n", + " exporter_6_a.add_constant_data([(g_2d, \"cc\", -g_2d.cell_centers)])\n", + " # All constant data will be exported also if not specified\n", + " exporter_6_a.write_vtu([\"p\", \"u\", \"mortar_p\"], time_step=step)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The default is that constant data is always printed to extra files. Since the vtu format requires geometrical and topoligical information on the mesh (points, connectivity etc.), this type of constant data is exported to each vtu file. Depending on the situation, this overhead can be in fact significant. Thus, one can also choose to print the constant data to the same files as the standard data, by setting a keyword when defining the exporter. With a similar setup as in part A, the same output is generated, but managed differently among files." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "exporter_6_b = pp.Exporter(model.gb, \"example-6-b\", \"exporter-tutorial\", export_constants_separately = False)\n", + "exporter_6_b.add_constant_data([(g_2d, \"cc\", g_2d.cell_centers)])\n", + "for step in range(5):\n", + " # Update the previously defined 'constant' data\n", + " if step == 2:\n", + " exporter_6_b.add_constant_data([(g_2d, \"cc\", -g_2d.cell_centers)])\n", + " # All constant data will be exported also if not specified\n", + " exporter_6_b.write_vtu([\"p\", \"u\", \"mortar_p\"], time_step=step)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example 5 revisisted: PVD format\n", + "The pvd format collects previously exported data. At every application of 'write_vtu' a corresponding pvd file is generated, gather all 'vtu' files correpsonding to this time step. It is recommended to use the 'pvd' file for analyzing the data in ParaView.\n", + "\n", + "In addition, when considering a time series, it is possible to gather data connected to multiple time steps, and assign the actual time to each time step. Assume, Example 5 corresponds to an adaptive time stepping. We define the actual times, and write collect the exported data from Example 5 in a single pvd file." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "times_5 = [0., 0.1, 1., 2., 10.]\n", + "exporter_5.write_pvd(times_5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example 7: Exporting data on a single grid\n", + "It is also possible to export data without prescribing a grid bucket, but a single grid. In this case, one has to assign the data when writing to vtu. For this, a key and a data array (with suitable size) has to be provided." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "exporter_7 = pp.Exporter(g_2d, \"example-7\", \"exporter-tutorial\")\n", + "exporter_7.write_vtu([(\"cc\", g_2d.cell_centers)])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 354b989614f7717b063b088532836f6a55614726 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Thu, 19 May 2022 09:21:59 +0200 Subject: [PATCH 43/83] MAINT: Clean-up exporter tutorial. --- tutorials/exporter.ipynb | 71 +++++++--------------------------------- 1 file changed, 12 insertions(+), 59 deletions(-) diff --git a/tutorials/exporter.ipynb b/tutorials/exporter.ipynb index 62db92f95e..20ac352d73 100644 --- a/tutorials/exporter.ipynb +++ b/tutorials/exporter.ipynb @@ -22,46 +22,9 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 1, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n", - "Bad key text.latex.preview in file /home/jakub/anaconda3/lib/python3.7/site-packages/matplotlib/mpl-data/stylelib/_classic_test.mplstyle, line 123 ('text.latex.preview : False')\n", - "You probably need to get an updated matplotlibrc file from\n", - "https://github.com/matplotlib/matplotlib/blob/v3.5.1/matplotlibrc.template\n", - "or from the matplotlib source distribution\n", - "\n", - "Bad key mathtext.fallback_to_cm in file /home/jakub/anaconda3/lib/python3.7/site-packages/matplotlib/mpl-data/stylelib/_classic_test.mplstyle, line 155 ('mathtext.fallback_to_cm : True # When True, use symbols from the Computer Modern')\n", - "You probably need to get an updated matplotlibrc file from\n", - "https://github.com/matplotlib/matplotlib/blob/v3.5.1/matplotlibrc.template\n", - "or from the matplotlib source distribution\n", - "\n", - "Bad key savefig.jpeg_quality in file /home/jakub/anaconda3/lib/python3.7/site-packages/matplotlib/mpl-data/stylelib/_classic_test.mplstyle, line 418 ('savefig.jpeg_quality: 95 # when a jpeg is saved, the default quality parameter.')\n", - "You probably need to get an updated matplotlibrc file from\n", - "https://github.com/matplotlib/matplotlib/blob/v3.5.1/matplotlibrc.template\n", - "or from the matplotlib source distribution\n", - "\n", - "Bad key keymap.all_axes in file /home/jakub/anaconda3/lib/python3.7/site-packages/matplotlib/mpl-data/stylelib/_classic_test.mplstyle, line 466 ('keymap.all_axes : a # enable all axes')\n", - "You probably need to get an updated matplotlibrc file from\n", - "https://github.com/matplotlib/matplotlib/blob/v3.5.1/matplotlibrc.template\n", - "or from the matplotlib source distribution\n", - "\n", - "Bad key animation.avconv_path in file /home/jakub/anaconda3/lib/python3.7/site-packages/matplotlib/mpl-data/stylelib/_classic_test.mplstyle, line 477 ('animation.avconv_path: avconv # Path to avconv binary. Without full path')\n", - "You probably need to get an updated matplotlibrc file from\n", - "https://github.com/matplotlib/matplotlib/blob/v3.5.1/matplotlibrc.template\n", - "or from the matplotlib source distribution\n", - "\n", - "Bad key animation.avconv_args in file /home/jakub/anaconda3/lib/python3.7/site-packages/matplotlib/mpl-data/stylelib/_classic_test.mplstyle, line 479 ('animation.avconv_args: # Additional arguments to pass to avconv')\n", - "You probably need to get an updated matplotlibrc file from\n", - "https://github.com/matplotlib/matplotlib/blob/v3.5.1/matplotlibrc.template\n", - "or from the matplotlib source distribution\n" - ] - } - ], + "outputs": [], "source": [ "import numpy as np\n", "import porepy as pp\n", @@ -103,7 +66,6 @@ " pp.contact_conditions.set_projections(self.gb)\n", "\n", "params = {\"use_ad\": True}\n", - "#model = pp.ContactMechanicsBiot(params)\n", "model = BiotFractured(params)\n", "model.prepare_simulation()\n" ] @@ -117,18 +79,9 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 2, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Keys of the states defined on subdomains: {'p', 'u', 'contact_traction'}\n", - "Keys of the states defined on interfaces: {'mortar_u', 'mortar_p'}\n" - ] - } - ], + "outputs": [], "source": [ "# Determine all keys of all states on all subdomains\n", "subdomain_states = []\n", @@ -157,7 +110,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -184,7 +137,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -211,7 +164,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -229,7 +182,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -245,7 +198,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -258,18 +211,18 @@ "metadata": {}, "source": [ "## Example 3: Exporting explicitly defined data\n", - "There exists the possibility to export data which is not stored in the grid bucket under 'pp.STATE'. This capability requires defining tuples of (1) a single grid, (2) a key, and (3) the data vector. For example, let's export the cell centers of the 2D subdomain 'g_2d', as well as all interfaces. Again, we define a dedicated exporter for this task.\n" + "There exists the possibility to export data which is not stored in the grid bucket under 'pp.STATE'. This capability requires defining tuples of (1) a single grid, (2) a key, and (3) the data vector. For example, let's export the cell centers of the 2D subdomain 'g_2d', as well as all interfaces (with different signs for the sake of the example). Again, we define a dedicated exporter for this task.\n" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "subdomain_data = [(g_2d, \"cc\", g_2d.cell_centers)]\n", "interface_data = [\n", - " (edges_1d[i], \"cc_e\", model.gb.edge_props(edges_1d[i])[\"mortar_grid\"].cell_centers)\n", + " (edges_1d[i], \"cc_e\", (-1) ** i * model.gb.edge_props(edges_1d[i])[\"mortar_grid\"].cell_centers)\n", " for i in range(len(edges_1d))\n", "]\n", "\n", From 1e71ee5a07cb1be509245b7d07901232dd54c042 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Thu, 19 May 2022 09:39:19 +0200 Subject: [PATCH 44/83] STY: black, flake8, isort. --- src/porepy/viz/exporter.py | 535 +++++++++++++++++++++---------------- 1 file changed, 309 insertions(+), 226 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 236988afb8..020a366624 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -10,27 +10,48 @@ import logging import os import sys -from typing import Dict, Generator, Iterable, List, Optional, Tuple, Union -from xml.etree import ElementTree as ET from collections import namedtuple +from typing import Dict, Iterable, List, Optional, Tuple, Union import meshio import numpy as np -import scipy.sparse as sps import porepy as pp # Module-wide logger logger = logging.getLogger(__name__) +# Introduce types used below + # Object type to store data to export. -Field = namedtuple('Field', ['name', 'values']) +Field = namedtuple("Field", ["name", "values"]) # Object for managing meshio-relevant data, as well as a container # for its storage, taking dimensions as inputs. -Meshio_Geom = namedtuple('Meshio_Geom', ['pts', 'connectivity', 'cell_ids']) +Meshio_Geom = namedtuple("Meshio_Geom", ["pts", "connectivity", "cell_ids"]) MD_Meshio_Geom = Dict[int, Meshio_Geom] +# Interface between subdomains +Interface = Tuple[pp.Grid, pp.Grid] + +# Altogether allowed data structures to define data for exporting +DataInput = Union[ + # Keys for states + str, + # Subdomain specific data types + Tuple[Union[pp.Grid, List[pp.Grid]], str], + Tuple[pp.Grid, str, np.ndarray], + Tuple[str, np.ndarray], + # Interface specific data types + Tuple[Union[Interface, List[Interface]], str], + Tuple[Interface, str, np.ndarray], +] + +# Data structure in which data is stored after preprocessing +SubdomainData = Dict[Tuple[pp.Grid, str], np.ndarray] +InterfaceData = Dict[Tuple[Interface, str], np.ndarray] + + class Exporter: """ Class for exporting data to vtu files. @@ -69,12 +90,12 @@ class Exporter: save.write_pvd(times) where times, is a list of actual times (not time steps), associated - to the previously exported time steps. - + to the previously exported time steps. + In general, pvd files gather data exported in separate files, including data on differently dimensioned grids, constant data, and finally time steps. - - + + deltaT is a scalar, denoting the time step size, and steps In the case of different keywords, change the file name with @@ -84,28 +105,6 @@ class Exporter: is_mortar, mortar_side, cell_id, grid_node_number, grid_edge_number """ - # Introduce types used below - - # Interface between subdomains - Interface = Tuple[pp.Grid, pp.Grid] - - # Altogether allowed data structures to define data - DataInput = Union[ - # Keys for states - str, - # Subdomain specific data types - Tuple[Union[pp.Grid, List[pp.Grid]], str], - Tuple[pp.Grid, str, np.ndarray], - Tuple[str, np.ndarray], - # Interface specific data types - Tuple[Union[Interface, List[Interface]], str], - Tuple[Interface, str, np.ndarray], - ] - - # Data structure in which data is stored after preprocessing - SubdomainData = Dict[Tuple[pp.Grid, str], np.ndarray] - InterfaceData = Dict[Tuple[Interface, str], np.ndarray] - def __init__( self, gb: Union[pp.Grid, pp.GridBucket], @@ -128,10 +127,10 @@ def __init__( 'export_constants_separately' (boolean) controling whether constant data is exported in separate files (default True). """ - # Exporter is operating on grid buckets. If a grid is provided, convert to a grid bucket. + # Exporter is operating on grid buckets. Convert to grid bucket if grid is provided. if isinstance(gb, pp.Grid): self.gb = pp.GridBucket() - self.gb.add_nodes(gb) + self.gb.add_nodes(gb) elif isinstance(gb, pp.GridBucket): self.gb = gb else: @@ -144,7 +143,9 @@ def __init__( # Check for optional keywords self.fixed_grid: bool = kwargs.pop("fixed_grid", True) self.binary: bool = kwargs.pop("binary", True) - self.export_constants_separately: bool = kwargs.pop("export_constants_separately", True) + self.export_constants_separately: bool = kwargs.pop( + "export_constants_separately", True + ) if kwargs: msg = "Exporter() got unexpected keyword argument '{}'" raise TypeError(msg.format(kwargs.popitem()[0])) @@ -159,7 +160,9 @@ def __init__( # in meshio format. self.m_dims = np.unique([d["mortar_grid"].dim for _, d in self.gb.edges()]) num_m_dims = self.m_dims.size - self.m_meshio_geom: MD_Meshio_Geom = dict(zip(self.m_dims, [tuple()] * num_m_dims)) + self.m_meshio_geom: MD_Meshio_Geom = dict( + zip(self.m_dims, [tuple()] * num_m_dims) + ) # Generate geometrical information in meshio format self._update_meshio_geom() @@ -184,7 +187,9 @@ def __init__( # Require numba if "numba" not in sys.modules: - raise NotImplementedError("The sorting algorithm requires numba to be installed.") + raise NotImplementedError( + "The sorting algorithm requires numba to be installed." + ) # TODO in use by anyone? def change_name(self, file_name: str) -> None: @@ -201,7 +206,7 @@ def change_name(self, file_name: str) -> None: def add_constant_data( self, data: Optional[Union[DataInput, List[DataInput]]] = None, - ) -> None: + ) -> None: """ Collect constant data (after unifying). And later export to own files. @@ -212,7 +217,7 @@ def add_constant_data( geometical infos are exported. """ # Identify change in constant data. Has the effect that - # the constant data container will be exported at the + # the constant data container will be exported at the # next application of write_vtu(). self._exported_constant_data_up_to_date: bool = False @@ -267,7 +272,7 @@ def write_vtu( if isinstance(gb, pp.Grid): # Create a new grid bucket solely with a single grid as nodes. self.gb = pp.GridBucket() - self.gb.add_nodes(gb) + self.gb.add_nodes(gb) else: self.gb = gb @@ -296,17 +301,15 @@ def write_vtu( if not self._exported_constant_data_up_to_date: # Export constant subdomain data to vtu self._export_data_vtu( - self._constant_subdomain_data, - time_step, - constant_data = True + self._constant_subdomain_data, time_step, constant_data=True ) # Export constant interface data to vtu self._export_data_vtu( self._constant_interface_data, time_step, - constant_data = True, - interface_data = True + constant_data=True, + interface_data=True, ) # Store the time step counter for later reference @@ -332,7 +335,7 @@ def write_vtu( if subdomain_data: self._export_data_vtu(subdomain_data, time_step) if interface_data: - self._export_data_vtu(interface_data, time_step, interface_data = True) + self._export_data_vtu(interface_data, time_step, interface_data=True) # Export grid bucket to pvd format file_name = self._make_file_name(self.file_name, time_step, extension=".pvd") @@ -365,18 +368,22 @@ def write_pvd( file_extension = self._exported_timesteps file_extension_constants = self._exported_timesteps_constants indices = [self._exported_timesteps.index(e) for e in file_extension] - file_extension_constants = [self._exported_timesteps_constants[i] for i in indices] + file_extension_constants = [ + self._exported_timesteps_constants[i] for i in indices + ] elif isinstance(file_extension, np.ndarray): file_extension = file_extension.tolist() # Make sure that the inputs are consistent - assert(len(file_extension) == times.shape[0]) + assert len(file_extension) == times.shape[0] # Extract the time steps related to constant data and # complying with file_extension. Implicitly test wheter # file_extension is a subset of _exported_timesteps. indices = [self._exported_timesteps.index(e) for e in file_extension] - file_extension_constants = [self._exported_timesteps_constants[i] for i in indices] + file_extension_constants = [ + self._exported_timesteps_constants[i] for i in indices + ] # Make mypy happy assert file_extension is not None @@ -384,7 +391,9 @@ def write_pvd( # Perform the same procedure as in _export_gb_pvd # but looping over all designated time steps. - o_file = open(self._append_folder_name(self.folder_name, self.file_name) + ".pvd", "w") + o_file = open( + self._append_folder_name(self.folder_name, self.file_name) + ".pvd", "w" + ) b = "LittleEndian" if sys.byteorder == "little" else "BigEndian" c = ' compressor="vtkZLibDataCompressor"' header = ( @@ -397,9 +406,7 @@ def write_pvd( fm = '\t\n' for time, fn, fn_constants in zip( - times, - file_extension, - file_extension_constants + times, file_extension, file_extension_constants ): # Go through all possible data types, analogously to # _export_gb_pvd. @@ -410,12 +417,16 @@ def write_pvd( o_file.write( fm % (time, self._make_file_name(self.file_name, fn, dim)) ) - + # Interface data. for dim in self.m_dims: if self.m_meshio_geom[dim] is not None: o_file.write( - fm % (time, self._make_file_name(self.file_name + "_mortar", fn, dim)) + fm + % ( + time, + self._make_file_name(self.file_name + "_mortar", fn, dim), + ) ) # Constant data. @@ -424,22 +435,28 @@ def write_pvd( for dim in self.dims: if self.meshio_geom[dim] is not None: o_file.write( - fm % (time, self._make_file_name( - self.file_name + "_constant", - fn_constants, - dim - )) + fm + % ( + time, + self._make_file_name( + self.file_name + "_constant", fn_constants, dim + ), + ) ) # Constant interface data. for dim in self.m_dims: if self.m_meshio_geom[dim] is not None: o_file.write( - fm % (time, self._make_file_name( - self.file_name + "_constant_mortar", - fn_constants, - dim - )) + fm + % ( + time, + self._make_file_name( + self.file_name + "_constant_mortar", + fn_constants, + dim, + ), + ) ) o_file.write("\n" + "") @@ -448,21 +465,22 @@ def write_pvd( # Some auxiliary routines used in write_vtu() def _sort_and_unify_data( - self, data: Optional[Union[DataInput, List[DataInput]]] = None, - ) -> Tuple[SubdomainData, InterfaceData]: + self, + data: Optional[Union[DataInput, List[DataInput]]] = None, + ) -> Tuple[SubdomainData, InterfaceData]: """ Preprocess data. - + The routine has two goals: 1. Splitting data into subdomain and interface data. 2. Unify the data format. Store data in dictionaries, with keys given by grids/edges and names, and values given by the data arrays. - + Parameters: data (Union[DataInput, List[DataInput]], optional): data provided by the user in the form of strings and/or tuples of grids/edges. - + Returns: Tuple[SubdomainData, InterfaceData]: Subdomain and edge data decomposed and brought into unified format. @@ -476,8 +494,8 @@ def _sort_and_unify_data( data = [data] # Initialize container for data associated to nodes - subdomain_data: Dict[SubdomainDataKey, np.ndarray] = dict() - interface_data: Dict[InterfaceDataKey, np.ndarray] = dict() + subdomain_data: SubdomainData = dict() + interface_data: InterfaceData = dict() # Auxiliary function transforming scalar ranged values # to vector ranged values when suitable. @@ -491,28 +509,32 @@ def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: # Make some checks if not value.size % g.num_cells == 0: raise ValueError("The data array is not compatible with the grid.") - + # Convert to vectorial data if more data provided than grid cells available, # and the value array is not already in vectorial format - if not value.size == g.num_cells and not (len(value.shape) > 1 and value.shape[1] == g.num_cells): + if not value.size == g.num_cells and not ( + len(value.shape) > 1 and value.shape[1] == g.num_cells + ): value = np.reshape(value, (-1, g.num_cells), "F") return value # Auxiliary functions for type checking - + def isinstance_interface(e: Tuple[pp.Grid, pp.Grid]) -> bool: """ Implementation of isinstance(e, Tuple[pp.Grid, pp.Grid]). """ return ( - isinstance(e, tuple) and - len(e) == 2 and - isinstance(e[0], pp.Grid) and - isinstance(e[1], pp.Grid) + isinstance(e, tuple) + and len(e) == 2 + and isinstance(e[0], pp.Grid) + and isinstance(e[1], pp.Grid) ) - def isinstance_Tuple_subdomain_str(t: Tuple[Union[pp.Grid, List[pp.Grid]], str]) -> bool: + def isinstance_Tuple_subdomain_str( + t: Tuple[Union[pp.Grid, List[pp.Grid]], str] + ) -> bool: """ Implementation of isinstance(t, Tuple[Union[pp.Grid, List[pp.Grid]], str]). @@ -529,7 +551,9 @@ def isinstance_Tuple_subdomain_str(t: Tuple[Union[pp.Grid, List[pp.Grid]], str]) else: return False - def isinstance_Tuple_interface_str(t: Tuple[Union[Tuple[pp.Grid, pp.Grid], List[Tuple[pp.Grid, pp.Grid]]], str]) -> bool: + def isinstance_Tuple_interface_str( + t: Tuple[Union[Tuple[pp.Grid, pp.Grid], List[Tuple[pp.Grid, pp.Grid]]], str] + ) -> bool: """ Implementation of isinstance(t, Tuple[Union[Edge, List[Edge]], str]). @@ -545,18 +569,26 @@ def isinstance_Tuple_interface_str(t: Tuple[Union[Tuple[pp.Grid, pp.Grid], List[ else: return False - def isinstance_Tuple_subdomain_str_array(t: Tuple[pp.Grid, str, np.ndarray]) -> bool: + def isinstance_Tuple_subdomain_str_array( + t: Tuple[pp.Grid, str, np.ndarray] + ) -> bool: """ Implementation of isinstance(t, Tuple[pp.Grid, str, np.ndarray]). """ types = list(map(type, t)) return isinstance(t[0], pp.Grid) and types[1:] == [str, np.ndarray] - def isinstance_Tuple_interface_str_array(t: Tuple[Tuple[pp.Grid, pp.Grid], str, np.ndarray]) -> bool: + def isinstance_Tuple_interface_str_array( + t: Tuple[Tuple[pp.Grid, pp.Grid], str, np.ndarray] + ) -> bool: """ Implementation of isinstance(t, Tuple[Interface, str, np.ndarray]). """ - return list(map(type, t)) == [tuple, str, np.ndarray] and isinstance_interface(t[0]) + return list(map(type, t)) == [ + tuple, + str, + np.ndarray, + ] and isinstance_interface(t[0]) def isinstance_Tuple_str_array(pt) -> bool: """ @@ -579,7 +611,7 @@ def isinstance_Tuple_str_array(pt) -> bool: # and edge data. Idea: Collect all data corresponding to grids and edges # related to the key. if isinstance(pt, str): - + # The data point is simply a string addressing a state using a key key = pt @@ -613,7 +645,9 @@ def isinstance_Tuple_str_array(pt) -> bool: # Make sure the key exists if not has_key: - raise ValueError(f"No data with provided key {key} present in the grid bucket.") + raise ValueError( + f"No data with provided key {key} present in the grid bucket." + ) # Case 2a: Data provided by a tuple (g, key). # Here, g is a single grid or a list of grids. @@ -622,9 +656,7 @@ def isinstance_Tuple_str_array(pt) -> bool: # By construction, the first component contains grids. # Unify by converting the first component to a list - grids: List[pp.Grid] = ( - pt[0] if isinstance(pt[0], list) else [pt[0]] - ) + grids: List[pp.Grid] = pt[0] if isinstance(pt[0], list) else [pt[0]] # By construction, the second component contains a key. key: str = pt[1] @@ -637,8 +669,10 @@ def isinstance_Tuple_str_array(pt) -> bool: # Make sure the data exists. if not (pp.STATE in d and key in d[pp.STATE]): - raise ValueError(f"""No state with prescribed key {key} - available on selected subdomains.""") + raise ValueError( + f"""No state with prescribed key {key} + available on selected subdomains.""" + ) # Fetch data and convert to vectorial format if suitable value: np.ndarray = toVectorFormat(d[pp.STATE][key], g) @@ -668,8 +702,10 @@ def isinstance_Tuple_str_array(pt) -> bool: # Make sure the data exists. if not (pp.STATE in d and key in d[pp.STATE]): - raise ValueError(f"""No state with prescribed key {key} - available on selected interfaces.""") + raise ValueError( + f"""No state with prescribed key {key} + available on selected interfaces.""" + ) # Fetch data and convert to vectorial format if suitable mg = d["mortar_grid"] @@ -688,9 +724,11 @@ def isinstance_Tuple_str_array(pt) -> bool: # buckets containing a single grid. grids: List[pp.Grid] = [g for g, _ in self.gb.nodes()] g: pp.Grid = grids[0] - if not len(grids)==1: - raise ValueError(f"""The data type used for {pt} is only - supported if the grid bucket only contains a single grid.""") + if not len(grids) == 1: + raise ValueError( + f"""The data type used for {pt} is only + supported if the grid bucket only contains a single grid.""" + ) # Fetch remaining ingredients required to define node data element key: str = pt[0] @@ -724,7 +762,9 @@ def isinstance_Tuple_str_array(pt) -> bool: interface_data[(e, key)] = value else: - raise ValueError(f"The provided data type used for {pt} is not supported.") + raise ValueError( + f"The provided data type used for {pt} is not supported." + ) return subdomain_data, interface_data @@ -737,7 +777,7 @@ def _update_constant_mesh_data(self) -> None: the same format as the output of _sort_and_unify_data. """ # Identify change in constant data. Has the effect that - # the constant data container will be exported at the + # the constant data container will be exported at the # next application of write_vtu(). self._exported_constant_data_up_to_date: bool = False @@ -750,12 +790,18 @@ def _update_constant_mesh_data(self) -> None: # Add mesh related, constant subdomain data by direct assignment for g, d in self.gb.nodes(): ones = np.ones(g.num_cells, dtype=int) - self._constant_subdomain_data[(g, "cell_id")] = np.arange(g.num_cells, dtype=int) + self._constant_subdomain_data[(g, "cell_id")] = np.arange( + g.num_cells, dtype=int + ) self._constant_subdomain_data[(g, "grid_dim")] = g.dim * ones if "node_number" in d: - self._constant_subdomain_data[(g, "grid_node_number")] = d["node_number"] * ones + self._constant_subdomain_data[(g, "grid_node_number")] = ( + d["node_number"] * ones + ) self._constant_subdomain_data[(g, "is_mortar")] = 0 * ones - self._constant_subdomain_data[(g, "mortar_side")] = pp.grids.mortar_grid.MortarSides.NONE_SIDE.value * ones + self._constant_subdomain_data[(g, "mortar_side")] = ( + pp.grids.mortar_grid.MortarSides.NONE_SIDE.value * ones + ) # Define constant interface data related to the mesh @@ -772,7 +818,9 @@ def _update_constant_mesh_data(self) -> None: # Construct empty arrays for all extra edge data self._constant_interface_data[(e, "grid_dim")] = np.empty(0, dtype=int) self._constant_interface_data[(e, "cell_id")] = np.empty(0, dtype=int) - self._constant_interface_data[(e, "grid_edge_number")] = np.empty(0, dtype=int) + self._constant_interface_data[(e, "grid_edge_number")] = np.empty( + 0, dtype=int + ) self._constant_interface_data[(e, "is_mortar")] = np.empty(0, dtype=int) self._constant_interface_data[(e, "mortar_side")] = np.empty(0, dtype=int) @@ -785,17 +833,14 @@ def _update_constant_mesh_data(self) -> None: # Grid dimension of the mortar grid self._constant_interface_data[(e, "grid_dim")] = np.hstack( - ( - self._constant_interface_data[(e, "grid_dim")], - g.dim * ones - ) + (self._constant_interface_data[(e, "grid_dim")], g.dim * ones) ) # Cell ids of the mortar grid self._constant_interface_data[(e, "cell_id")] = np.hstack( ( self._constant_interface_data[(e, "cell_id")], - np.arange(g.num_cells, dtype=int) + mg_num_cells + np.arange(g.num_cells, dtype=int) + mg_num_cells, ) ) @@ -803,23 +848,20 @@ def _update_constant_mesh_data(self) -> None: self._constant_interface_data[(e, "grid_edge_number")] = np.hstack( ( self._constant_interface_data[(e, "grid_edge_number")], - d["edge_number"] * ones + d["edge_number"] * ones, ) ) # Whether the edge is mortar self._constant_interface_data[(e, "is_mortar")] = np.hstack( - ( - self._constant_interface_data[(e, "is_mortar")], - ones - ) + (self._constant_interface_data[(e, "is_mortar")], ones) ) # Side of the mortar self._constant_interface_data[(e, "mortar_side")] = np.hstack( ( self._constant_interface_data[(e, "mortar_side")], - side.value * ones + side.value * ones, ) ) @@ -827,15 +869,15 @@ def _update_constant_mesh_data(self) -> None: mg_num_cells += g.num_cells def _export_data_vtu( - self, - data: Union[SubdomainData, InterfaceData], - time_step: int, - **kwargs, - ) -> None: + self, + data: Union[SubdomainData, InterfaceData], + time_step: int, + **kwargs, + ) -> None: """ Collect data associated to a single grid dimension and passing further to the final writing routine. - + For each fixed dimension, all subdomains of that dimension and the data related to that subdomains will be exported simultaneously. Analogously for interfaces. @@ -872,7 +914,7 @@ def _export_data_vtu( file_name_base = self._append_folder_name(self.folder_name, file_name_base) # Collect unique keys, and for unique sorting, sort by alphabet - keys = list(set([key for _,key in data])) + keys = list(set([key for _, key in data])) keys.sort() # Collect the data and extra data in a single stack for each dimension @@ -883,12 +925,14 @@ def _export_data_vtu( # Get all geometrical entities of dimension dim: # subdomains with correct grid dimension for subdomain data, and # interfaces with correct mortar grid dimension for interface data. - # TODO update and simplify when new GridTree structure is in place. + # TODO update and simplify when new GridTree structure is in place. entities: Union[List[pp.Grid], List[Tuple[pp.Grid, pp.Grid]]] = [] if is_subdomain_data: entities = self.gb.get_grids(lambda g: g.dim == dim) else: - entities = [e for e, d in self.gb.edges() if d["mortar_grid"].dim == dim] + entities = [ + e for e, d in self.gb.edges() if d["mortar_grid"].dim == dim + ] # Construct the list of fields represented on this dimension. fields: List[Field] = [] @@ -896,14 +940,16 @@ def _export_data_vtu( # Collect the values associated to all entities values = [] for e in entities: - if (e,key) in data: - values.append(data[(e,key)]) + if (e, key) in data: + values.append(data[(e, key)]) # Require data for all or none entities of that dimension. # FIXME: By changing the export strategy, this could be fixed. if len(values) not in [0, len(entities)]: - raise ValueError(f"""Insufficient amount of data provided for - key {key} on dimension {dim}.""") + raise ValueError( + f"""Insufficient amount of data provided for + key {key} on dimension {dim}.""" + ) # If data has been found, append data to list after stacking # values for different entities @@ -913,7 +959,9 @@ def _export_data_vtu( # Print data for the particular dimension. Since geometric info # is required distinguish between subdomain and interface data. - meshio_geom = self.meshio_geom[dim] if is_subdomain_data else self.m_meshio_geom[dim] + meshio_geom = ( + self.meshio_geom[dim] if is_subdomain_data else self.m_meshio_geom[dim] + ) if meshio_geom is not None: self._write(fields, file_name, meshio_geom) @@ -944,15 +992,14 @@ def _export_gb_pvd(self, file_name: str, time_step: int) -> None: # Subdomain data. for dim in self.dims: if self.meshio_geom[dim] is not None: - o_file.write( - fm % self._make_file_name(self.file_name, time_step, dim) - ) + o_file.write(fm % self._make_file_name(self.file_name, time_step, dim)) # Interface data. for dim in self.m_dims: if self.m_meshio_geom[dim] is not None: o_file.write( - fm % self._make_file_name(self.file_name + "_mortar", time_step, dim) + fm + % self._make_file_name(self.file_name + "_mortar", time_step, dim) ) # If constant data is exported to separate vtu files, also include @@ -963,10 +1010,9 @@ def _export_gb_pvd(self, file_name: str, time_step: int) -> None: for dim in self.dims: if self.meshio_geom[dim] is not None: o_file.write( - fm % self._make_file_name( - self.file_name + "_constant", - self._time_step_constants, - dim + fm + % self._make_file_name( + self.file_name + "_constant", self._time_step_constants, dim ) ) @@ -974,10 +1020,11 @@ def _export_gb_pvd(self, file_name: str, time_step: int) -> None: for dim in self.m_dims: if self.m_meshio_geom[dim] is not None: o_file.write( - fm % self._make_file_name( + fm + % self._make_file_name( self.file_name + "_constant_mortar", self._time_step_constants, - dim + dim, ) ) @@ -1032,8 +1079,7 @@ def _export_grid( raise ValueError(f"Unknown dimension {dim}") def _export_1d( - self, - gs: Iterable[pp.Grid] + self, gs: Iterable[pp.Grid] ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ Export the geometrical data (point coordinates) and connectivity @@ -1051,9 +1097,7 @@ def _export_1d( cell_type = "line" # Dictionary storing cell->nodes connectivity information - cell_to_nodes: Dict[str, np.ndarray] = { - cell_type: np.empty((0, 2), dtype=int) - } + cell_to_nodes: Dict[str, np.ndarray] = {cell_type: np.empty((0, 2), dtype=int)} # Dictionary collecting all cell ids for each cell type. # Since each cell is a line, the list of cell ids is trivial @@ -1102,10 +1146,7 @@ def _export_1d( return Meshio_Geom(meshio_pts, meshio_cells, meshio_cell_id) def _simplex_cell_to_nodes( - self, - n: int, - g: pp.Grid, - cells: Optional[np.ndarray] = None + self, n: int, g: pp.Grid, cells: Optional[np.ndarray] = None ) -> np.ndarray: """ Determine cell to node connectivity for a general n-simplex mesh. @@ -1120,22 +1161,27 @@ def _simplex_cell_to_nodes( (associated to cells) all associated nodes are marked. """ # Determine cell-node ptr - cn_indptr = g.cell_nodes().indptr[:-1] if cells is None else g.cell_nodes().indptr[cells] - + cn_indptr = ( + g.cell_nodes().indptr[:-1] + if cells is None + else g.cell_nodes().indptr[cells] + ) + # Collect the indptr to all nodes of the cell; each n-simplex cell contains n nodes - expanded_cn_indptr = np.vstack([cn_indptr + i for i in range(n)]).reshape(-1, order="F") - + expanded_cn_indptr = np.vstack([cn_indptr + i for i in range(n)]).reshape( + -1, order="F" + ) + # Detect all corresponding nodes by applying the expanded mask to the indices expanded_cn_indices = g.cell_nodes().indices[expanded_cn_indptr] - + # Convert to right format. - cn_indices = np.reshape(expanded_cn_indices, (-1,n), order="C") + cn_indices = np.reshape(expanded_cn_indices, (-1, n), order="C") return cn_indices def _export_2d( - self, - gs: Iterable[pp.Grid] + self, gs: Iterable[pp.Grid] ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ Export the geometrical data (point coordinates) and connectivity @@ -1151,7 +1197,7 @@ def _export_2d( # Use standard names for simple object types: in this routine only triangle # and quad cells are treated in a special manner. - polygon_map = {"polygon3": "triangle", "polygon4": "quad"} + polygon_map = {"polygon3": "triangle", "polygon4": "quad"} # Dictionary storing cell->nodes connectivity information for all # cell types. For this, the nodes have to be sorted such that @@ -1206,7 +1252,7 @@ def _export_2d( # Check if cell_type already defined in cell_to_nodes, otherwise construct if cell_type not in cell_to_nodes: - cell_to_nodes[cell_type] = np.empty((0,n), dtype=int) + cell_to_nodes[cell_type] = np.empty((0, n), dtype=int) # Special case: Triangle cells, i.e., n=3. if cell_type == "triangle": @@ -1220,27 +1266,30 @@ def _export_2d( # Quad and polygon cells. else: - # For quads/polygons, g.cell_nodes cannot be blindly used as for triangles, since the - # ordering of the nodes may define a cell of the type (here specific for quads) - # x--x and not x--x + # For quads/polygons, g.cell_nodes cannot be blindly used as for + # triangles, since the ordering of the nodes may define a cell of + # the type (here specific for quads) x--x and not x--x # \/ | | # /\ | | # x--x x--x . - # Therefore, use both g.cell_faces and g.face_nodes to make use of face information - # and sort those correctly to retrieve the correct connectivity., + # Therefore, use both g.cell_faces and g.face_nodes to make use of face + # information and sort those to retrieve the correct connectivity. - # Strategy: Collect all cell nodes including their connectivity in a matrix of - # double num cell size. The goal will be to gather starting and end points for - # all faces of each cell, sort those faces, such that they form a circlular graph, - # and then choose the resulting starting points of all faces to define the connectivity. + # Strategy: Collect all cell nodes including their connectivity in a + # matrix of double num cell size. The goal will be to gather starting + # and end points for all faces of each cell, sort those faces, such that + # they form a circlular graph, and then choose the resulting starting + # points of all faces to define the connectivity. # Fetch corresponding cells cells = g_cell_map[cell_type] - # Determine all faces of all cells. Use an analogous approach as used to determine - # all cell nodes for triangle cells. And use that a polygon with n nodes has also - # n faces. + # Determine all faces of all cells. Use an analogous approach as used to + # determine all cell nodes for triangle cells. And use that a polygon with + # n nodes has also n faces. cf_indptr = g.cell_faces.indptr[cells] - expanded_cf_indptr = np.vstack([cf_indptr + i for i in range(n)]).reshape(-1, order="F") + expanded_cf_indptr = np.vstack( + [cf_indptr + i for i in range(n)] + ).reshape(-1, order="F") cf_indices = g.cell_faces.indices[expanded_cf_indptr] # Determine the associated (two) nodes of all faces for each cell. @@ -1248,20 +1297,23 @@ def _export_2d( # Extract nodes for first and second node of each face; reshape such # that all first nodes of all faces for each cell are stored in one row, # i.e., end up with an array of size num_cells (for this cell type) x n. - cfn_indices = [g.face_nodes.indices[fn_indptr + i].reshape(-1,n) for i in range(2)] + cfn_indices = [ + g.face_nodes.indices[fn_indptr + i].reshape(-1, n) + for i in range(2) + ] # Group first and second nodes, with alternating order of rows. # By this, each cell is respresented by two rows. The first and second # rows contain first and second nodes of faces. And each column stands # for one face. - cfn = np.ravel(cfn_indices, order="F").reshape(n,-1).T + cfn = np.ravel(cfn_indices, order="F").reshape(n, -1).T # Sort faces for each cell such that they form a chain. Use a function # compiled with Numba. This step is the bottleneck of this routine. cfn = self._sort_point_pairs_numba(cfn).astype(int) - # For each cell pick the sorted nodes such that they form a chain and thereby - # define the connectivity, i.e., skip every second row. - cn_indices = cfn[::2,:] + # For each cell pick the sorted nodes such that they form a chain + # and thereby define the connectivity, i.e., skip every second row. + cn_indices = cfn[::2, :] # Add offset to account for previous grids, and store cell_to_nodes[cell_type] = np.vstack( @@ -1281,12 +1333,11 @@ def _export_2d( # the corresponding cells with ids from cell_id. for block, (cell_type, cell_block) in enumerate(cell_to_nodes.items()): - # Meshio requires the keyword "polgon" for general polygons. + # Meshio requires the keyword "polgon" for general polygons. # Thus, remove the number of nodes associated to polygons. cell_type_meshio_format = "polygon" if "polygon" in cell_type else cell_type meshio_cells[block] = meshio.CellBlock( - cell_type_meshio_format, - cell_block.astype(int) + cell_type_meshio_format, cell_block.astype(int) ) meshio_cell_id[block] = np.array(cell_id[cell_type]) @@ -1327,55 +1378,62 @@ def _function_to_compile(lines): # Implicitly expect that all chains have the same length num_chains, chain_length = lines.shape # Since for each chain lines includes two rows, take the half - num_chains = int(num_chains/2) + num_chains = int(num_chains / 2) # Initialize array of sorted lines to be the final output - sorted_lines = np.zeros((2*num_chains,chain_length)) + sorted_lines = np.zeros((2 * num_chains, chain_length)) # Fix the first line segment for each chain and identify # it as in place regarding the sorting. sorted_lines[:, 0] = lines[:, 0] # Keep track of which lines have been fixed and which are still candidates found = np.zeros(chain_length) found[0] = 1 - + # Loop over chains and Consider each chain separately. for c in range(num_chains): # Initialize found making any line segment aside of the first a candidate found[1:] = 0 - - # Define the end point of the previous and starting point for the next line segment - prev = sorted_lines[2*c+1, 0] - + + # Define the end point of the previous and starting point for the next + # line segment + prev = sorted_lines[2 * c + 1, 0] + # The sorting algorithm: Loop over all position in the chain to be set next. # Find the right candidate to be moved to this position and possibly flipped # if needed. A candidate is identified as fitting if it contains one point # equal to the current starting point. This algorithm uses a double loop, # which is the most naive approach. However, assume chain_length is in # general small. - for i in range(1, chain_length): # The first line has already been found - for j in range(1, chain_length): # The first line has already been found + for i in range( + 1, chain_length + ): # The first line has already been found + for j in range( + 1, chain_length + ): # The first line has already been found # A candidate line segment with matching start and end point # in the first component of the point pair. - if np.abs(found[j]) < 1e-6 and lines[2*c, j] == prev: + if np.abs(found[j]) < 1e-6 and lines[2 * c, j] == prev: # Copy the segment to the right place - sorted_lines[2*c:2*c+2, i] = lines[2*c:2*c+2, j] + sorted_lines[2 * c : 2 * c + 2, i] = lines[ + 2 * c : 2 * c + 2, j + ] # Mark as used found[j] = 1 # Define the starting point for the next line segment - prev = lines[2*c+1, j] + prev = lines[2 * c + 1, j] break # A candidate line segment with matching start and end point # in the second component of the point pair. - elif np.abs(found[j])<1e-6 and lines[2*c+1, j] == prev: + elif np.abs(found[j]) < 1e-6 and lines[2 * c + 1, j] == prev: # Flip and copy the segment to the right place - sorted_lines[2*c, i] = lines[2*c+1, j] - sorted_lines[2*c+1, i] = lines[2*c, j] + sorted_lines[2 * c, i] = lines[2 * c + 1, j] + sorted_lines[2 * c + 1, i] = lines[2 * c, j] # Mark as used found[j] = 1 # Define the starting point for the next line segment - prev = lines[2*c, j] + prev = lines[2 * c, j] break - + # Return the sorted lines defining chains. return sorted_lines @@ -1406,7 +1464,7 @@ def _export_3d( # and general polyhedra. meshio does not allow for a mix of cell types # in 3d within a single grid. Thus, if a grid contains cells of varying # types, all cells will be casted as polyhedra. - + # Dictionaries storing cell->faces and cell->nodes connectivity # information for all cell types. For the special meshio geometric # types "tetra" and "hexahedron", cell->nodes will be used; the @@ -1453,9 +1511,13 @@ def _export_3d( # hence checking solely the type is not sufficient); all other # grids will be identified as "polyehdron" grids, even if they # contain single "tetra" cells. - if n==4 and len(set(num_faces_per_cell)) == 1: + if n == 4 and len(set(num_faces_per_cell)) == 1: cell_type = "tetra" - elif isinstance(g, pp.CartGrid) and n==6 and len(set(num_faces_per_cell)) == 1: + elif ( + isinstance(g, pp.CartGrid) + and n == 6 + and len(set(num_faces_per_cell)) == 1 + ): cell_type = "hexahedron" else: cell_type = f"polyhedron{n}" @@ -1473,25 +1535,29 @@ def _export_3d( # Determine local connectivity for all cells. Due to differnt # treatment of special and general cell types and to conform - # with array sizes (for polyhedra), treat each cell type + # with array sizes (for polyhedra), treat each cell type # separately. for n in np.unique(num_faces_per_cell): # Define the cell type as above - if n==4 and len(set(num_faces_per_cell)) == 1: + if n == 4 and len(set(num_faces_per_cell)) == 1: cell_type = "tetra" - elif isinstance(g, pp.CartGrid) and n==6 and len(set(num_faces_per_cell)) == 1: + elif ( + isinstance(g, pp.CartGrid) + and n == 6 + and len(set(num_faces_per_cell)) == 1 + ): cell_type = "hexahedron" else: cell_type = f"polyhedron{n}" # Special case: Tetrahedra if cell_type == "tetra": - # Since tetra is a meshio-known cell type, cell_to_nodes connectivity information - # is provided, i.e., for each cell the nodes in the meshio-defined local numbering - # of nodes is generated. Since tetra is a simplex, the nodes do not need to be - # ordered in any specific type, as the geometrical object is invariant under - # permutations. + # Since tetra is a meshio-known cell type, cell_to_nodes connectivity + # information is provided, i.e., for each cell the nodes in the + # meshio-defined local numbering of nodes is generated. Since tetra + # is a simplex, the nodes do not need to be ordered in any specific + # type, as the geometrical object is invariant under permutations. # Fetch tetra cells cells = g_cell_map[cell_type] @@ -1501,9 +1567,10 @@ def _export_3d( # Initialize data structure if not available yet if cell_type not in cell_to_nodes: - cell_to_nodes[cell_type] = np.empty((0,4), dtype=int) + cell_to_nodes[cell_type] = np.empty((0, 4), dtype=int) - # Store cell-node connectivity, and add offset taking into account previous grids + # Store cell-node connectivity, and add offset taking into account + # previous grids cell_to_nodes[cell_type] = np.vstack( (cell_to_nodes[cell_type], cn_indices + nodes_offset) ) @@ -1530,16 +1597,23 @@ def _export_3d( cells = g_cell_map[cell_type] # Determine cell-node ptr - cn_indptr = g.cell_nodes().indptr[:-1] if cells is None else g.cell_nodes().indptr[cells] - - # Collect the indptr to all nodes of the cell; each n-simplex cell contains n nodes - expanded_cn_indptr = np.vstack([cn_indptr + i for i in range(8)]).reshape(-1, order="F") - - # Detect all corresponding nodes by applying the expanded mask to the indices + cn_indptr = ( + g.cell_nodes().indptr[:-1] + if cells is None + else g.cell_nodes().indptr[cells] + ) + + # Collect the indptr to all nodes of the cell; + # each n-simplex cell contains n nodes + expanded_cn_indptr = np.vstack( + [cn_indptr + i for i in range(8)] + ).reshape(-1, order="F") + + # Detect all corresponding nodes by applying the expanded mask to indices expanded_cn_indices = g.cell_nodes().indices[expanded_cn_indptr] - + # Convert to right format. - cn_indices = np.reshape(expanded_cn_indices, (-1,8), order="C") + cn_indices = np.reshape(expanded_cn_indices, (-1, 8), order="C") # Reorder nodes to comply with the node numbering convention of meshio # regarding hexahedron cells. @@ -1547,9 +1621,10 @@ def _export_3d( # Initialize data structure if not available yet if cell_type not in cell_to_nodes: - cell_to_nodes[cell_type] = np.empty((0,8), dtype=int) + cell_to_nodes[cell_type] = np.empty((0, 8), dtype=int) - # Store cell-node connectivity, and add offset taking into account previous grids + # Store cell-node connectivity, and add offset taking into account + # previous grids cell_to_nodes[cell_type] = np.vstack( (cell_to_nodes[cell_type], cn_indices + nodes_offset) ) @@ -1557,11 +1632,12 @@ def _export_3d( else: # Identify remaining cells as polyhedra - # The general strategy is to define the connectivity as cell-face information, - # where the faces are defined by nodes. Hence, this information is significantly - # larger than the info provided for tetra cells. Here, we make use of the fact - # that g.face_nodes provides nodes ordered wrt. the right-hand rule. - + # The general strategy is to define the connectivity as cell-face + # information, where the faces are defined by nodes. Hence, this + # information is significantly larger than the info provided for + # tetra cells. Here, we make use of the fact that g.face_nodes + # provides nodes ordered wrt. the right-hand rule. + # Fetch cells with n faces cells = g_cell_map[cell_type] @@ -1580,9 +1656,12 @@ def _export_3d( cell_to_faces[cell_type] = [] cell_to_faces[cell_type] += [ [ - fn_indices[fn_indptr[f] : fn_indptr[f+1]] # nodes - for f in cf_indices[cf_indptr[c]:cf_indptr[c+1]] # faces - ] for c in cells # cells + fn_indices[fn_indptr[f] : fn_indptr[f + 1]] # nodes + for f in cf_indices[ + cf_indptr[c] : cf_indptr[c + 1] + ] # faces + ] + for c in cells # cells ] # Update offset @@ -1662,7 +1741,9 @@ def _write( # Write mesh information and data to VTK format. meshio.write(file_name, meshio_grid_to_export, binary=self.binary) - def _append_folder_name(self, folder_name: Optional[str] = None, name: str = "") -> str: + def _append_folder_name( + self, folder_name: Optional[str] = None, name: str = "" + ) -> str: """ Auxiliary method setting up potentially non-existent folder structure and setting up a path for exporting. @@ -1713,7 +1794,9 @@ def _make_file_name( """ # Define non-empty time step extension including zero padding. - time_extension = "" if time_step is None else "_" + str(time_step).zfill(self.padding) + time_extension = ( + "" if time_step is None else "_" + str(time_step).zfill(self.padding) + ) # Define non-empty dim extension dim_extension = "" if dim is None else "_" + str(dim) From a5f37bc44ec65f8527b44a1eccb31b08fbb0bd5d Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Thu, 19 May 2022 12:21:21 +0200 Subject: [PATCH 45/83] STY: First attempt to make mypy happy. --- src/porepy/viz/exporter.py | 75 +++++++++++++++++++------------------- 1 file changed, 37 insertions(+), 38 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 020a366624..cdd3c72a27 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -154,15 +154,13 @@ def __init__( # meshio format. Include all but the 0-d grids self.dims = np.setdiff1d(self.gb.all_dims(), [0]) num_dims = self.dims.size - self.meshio_geom: MD_Meshio_Geom = dict(zip(self.dims, [tuple()] * num_dims)) + self.meshio_geom: MD_Meshio_Geom = dict() # Generate infrastructure for storing fixed-dimensional mortar grids # in meshio format. self.m_dims = np.unique([d["mortar_grid"].dim for _, d in self.gb.edges()]) num_m_dims = self.m_dims.size - self.m_meshio_geom: MD_Meshio_Geom = dict( - zip(self.m_dims, [tuple()] * num_m_dims) - ) + self.m_meshio_geom: MD_Meshio_Geom = dict() # Generate geometrical information in meshio format self._update_meshio_geom() @@ -178,7 +176,7 @@ def __init__( # Reference to the last time step used for exporting constant data. self._time_step_constants: int = 0 # Storage for file name extensions for time steps, regarding constant data. - self._exported_timesteps_constants: List[int] = [] + self._exported_timesteps_constants: List[int] = list() # Identifier for whether constant data is up-to-date. self._exported_constant_data_up_to_date: bool = False @@ -219,7 +217,7 @@ def add_constant_data( # Identify change in constant data. Has the effect that # the constant data container will be exported at the # next application of write_vtu(). - self._exported_constant_data_up_to_date: bool = False + self._exported_constant_data_up_to_date = False # Preprocessing of the user-defined constant data # Has two main goals: @@ -228,10 +226,10 @@ def add_constant_data( subdomain_data, interface_data = self._sort_and_unify_data(data) # Add the user-defined data to the containers for constant data. - for key, value in subdomain_data.items(): - self._constant_subdomain_data[key] = value.copy() - for key, value in interface_data.items(): - self._constant_interface_data[key] = value.copy() + for key_s, value in subdomain_data.items(): + self._constant_subdomain_data[key_s] = value.copy() + for key_i, value in interface_data.items(): + self._constant_interface_data[key_i] = value.copy() def write_vtu( self, @@ -322,14 +320,15 @@ def write_vtu( self._exported_constant_data_up_to_date = True # Store the timestep refering to the origin of the constant data - self._exported_timesteps_constants.append(self._time_step_constant_data) + if self._time_step_constant_data: + self._exported_timesteps_constants.append(self._time_step_constant_data) else: # Append constant subdomain and interface data to the # standard containers for subdomain and interface data. - for key, value in self._constant_subdomain_data.items(): - subdomain_data[key] = value.copy() - for key, value in self._constant_interface_data.items(): - interface_data[key] = value.copy() + for key_s, value in self._constant_subdomain_data.items(): + subdomain_data[key_s] = value.copy() + for key_i, value in self._constant_interface_data.items(): + interface_data[key_i] = value.copy() # Export subdomain and interface data to vtu format if existing if subdomain_data: @@ -521,9 +520,9 @@ def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: # Auxiliary functions for type checking - def isinstance_interface(e: Tuple[pp.Grid, pp.Grid]) -> bool: + def isinstance_interface(e: Interface) -> bool: """ - Implementation of isinstance(e, Tuple[pp.Grid, pp.Grid]). + Implementation of isinstance(e, Interface). """ return ( isinstance(e, tuple) @@ -552,7 +551,7 @@ def isinstance_Tuple_subdomain_str( return False def isinstance_Tuple_interface_str( - t: Tuple[Union[Tuple[pp.Grid, pp.Grid], List[Tuple[pp.Grid, pp.Grid]]], str] + t: Tuple[Union[Interface, List[Interface]], str] ) -> bool: """ Implementation of isinstance(t, Tuple[Union[Edge, List[Edge]], str]). @@ -579,7 +578,7 @@ def isinstance_Tuple_subdomain_str_array( return isinstance(t[0], pp.Grid) and types[1:] == [str, np.ndarray] def isinstance_Tuple_interface_str_array( - t: Tuple[Tuple[pp.Grid, pp.Grid], str, np.ndarray] + t: Tuple[Interface, str, np.ndarray] ) -> bool: """ Implementation of isinstance(t, Tuple[Interface, str, np.ndarray]). @@ -638,7 +637,7 @@ def isinstance_Tuple_str_array(pt) -> bool: # Fetch data and convert to vectorial format if suggested by the size mg = d["mortar_grid"] - value: np.ndarray = toVectorFormat(d[pp.STATE][key], mg) + value = toVectorFormat(d[pp.STATE][key], mg) # Add data point in correct format to the collection interface_data[(e, key)] = value @@ -659,7 +658,7 @@ def isinstance_Tuple_str_array(pt) -> bool: grids: List[pp.Grid] = pt[0] if isinstance(pt[0], list) else [pt[0]] # By construction, the second component contains a key. - key: str = pt[1] + key= pt[1] # Loop over grids and fetch the states corresponding to the key for g in grids: @@ -675,7 +674,7 @@ def isinstance_Tuple_str_array(pt) -> bool: ) # Fetch data and convert to vectorial format if suitable - value: np.ndarray = toVectorFormat(d[pp.STATE][key], g) + value = toVectorFormat(d[pp.STATE][key], g) # Add data point in correct format to collection subdomain_data[(g, key)] = value @@ -692,7 +691,7 @@ def isinstance_Tuple_str_array(pt) -> bool: ) # By construction, the second component contains a key. - key: str = pt[1] + key = pt[1] # Loop over edges and fetch the states corresponding to the key for e in edges: @@ -709,7 +708,7 @@ def isinstance_Tuple_str_array(pt) -> bool: # Fetch data and convert to vectorial format if suitable mg = d["mortar_grid"] - value: np.ndarray = toVectorFormat(d[pp.STATE][key], mg) + value = toVectorFormat(d[pp.STATE][key], mg) # Add data point in correct format to collection interface_data[(e, key)] = value @@ -722,8 +721,8 @@ def isinstance_Tuple_str_array(pt) -> bool: # Fetch the correct grid. This option is only supported for grid # buckets containing a single grid. - grids: List[pp.Grid] = [g for g, _ in self.gb.nodes()] - g: pp.Grid = grids[0] + grids = [g for g, _ in self.gb.nodes()] + g = grids[0] if not len(grids) == 1: raise ValueError( f"""The data type used for {pt} is only @@ -731,8 +730,8 @@ def isinstance_Tuple_str_array(pt) -> bool: ) # Fetch remaining ingredients required to define node data element - key: str = pt[0] - value: np.ndarray = toVectorFormat(pt[1], g) + key = pt[0] + value = toVectorFormat(pt[1], g) # Add data point in correct format to collection subdomain_data[(g, key)] = value @@ -741,9 +740,9 @@ def isinstance_Tuple_str_array(pt) -> bool: # Idea: Since this is already the desired format, process naturally. elif isinstance_Tuple_subdomain_str_array(pt): # Data point in correct format - g: pp.Grid = pt[0] - key: str = pt[1] - value: np.ndarray = toVectorFormat(pt[2], g) + g = pt[0] + key = pt[1] + value = toVectorFormat(pt[2], g) # Add data point in correct format to collection subdomain_data[(g, key)] = value @@ -752,11 +751,11 @@ def isinstance_Tuple_str_array(pt) -> bool: # Idea: Since this is already the desired format, process naturally. elif isinstance_Tuple_interface_str_array(pt): # Data point in correct format - e: Interface = pt[0] - key: str = pt[1] + e = pt[0] + key = pt[1] d = self.gb.edge_props(e) mg = d["mortar_grid"] - value: np.ndarray = toVectorFormat(pt[2], mg) + value = toVectorFormat(pt[2], mg) # Add data point in correct format to collection interface_data[(e, key)] = value @@ -779,7 +778,7 @@ def _update_constant_mesh_data(self) -> None: # Identify change in constant data. Has the effect that # the constant data container will be exported at the # next application of write_vtu(). - self._exported_constant_data_up_to_date: bool = False + self._exported_constant_data_up_to_date = False # Define constant subdomain data related to the mesh @@ -871,7 +870,7 @@ def _update_constant_mesh_data(self) -> None: def _export_data_vtu( self, data: Union[SubdomainData, InterfaceData], - time_step: int, + time_step: Optional[int], **kwargs, ) -> None: """ @@ -926,7 +925,7 @@ def _export_data_vtu( # subdomains with correct grid dimension for subdomain data, and # interfaces with correct mortar grid dimension for interface data. # TODO update and simplify when new GridTree structure is in place. - entities: Union[List[pp.Grid], List[Tuple[pp.Grid, pp.Grid]]] = [] + entities: Union[List[pp.Grid], List[Tuple[pp.Grid, pp.Grid]]] = list() if is_subdomain_data: entities = self.gb.get_grids(lambda g: g.dim == dim) else: @@ -965,7 +964,7 @@ def _export_data_vtu( if meshio_geom is not None: self._write(fields, file_name, meshio_geom) - def _export_gb_pvd(self, file_name: str, time_step: int) -> None: + def _export_gb_pvd(self, file_name: str, time_step: Optional[int]) -> None: """ Routine to export to pvd format and collect all data scattered over several files for distinct grid dimensions. From b5b1eceffd8f18a3af681c9ac5fc9628e3d97d80 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Thu, 19 May 2022 13:00:34 +0200 Subject: [PATCH 46/83] STY: flake8 --- src/porepy/viz/exporter.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index cdd3c72a27..0da3f2e82e 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -153,13 +153,11 @@ def __init__( # Generate infrastructure for storing fixed-dimensional grids in # meshio format. Include all but the 0-d grids self.dims = np.setdiff1d(self.gb.all_dims(), [0]) - num_dims = self.dims.size self.meshio_geom: MD_Meshio_Geom = dict() # Generate infrastructure for storing fixed-dimensional mortar grids # in meshio format. self.m_dims = np.unique([d["mortar_grid"].dim for _, d in self.gb.edges()]) - num_m_dims = self.m_dims.size self.m_meshio_geom: MD_Meshio_Geom = dict() # Generate geometrical information in meshio format @@ -658,7 +656,7 @@ def isinstance_Tuple_str_array(pt) -> bool: grids: List[pp.Grid] = pt[0] if isinstance(pt[0], list) else [pt[0]] # By construction, the second component contains a key. - key= pt[1] + key = pt[1] # Loop over grids and fetch the states corresponding to the key for g in grids: From a8d84ff0ea58aadca0a4d9e069e4412d031db000 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Wed, 25 May 2022 11:39:24 +0200 Subject: [PATCH 47/83] STY: mypy. --- src/porepy/viz/exporter.py | 107 +++++++++++++++---------------------- 1 file changed, 42 insertions(+), 65 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 0da3f2e82e..be275c03c0 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -29,7 +29,7 @@ # Object for managing meshio-relevant data, as well as a container # for its storage, taking dimensions as inputs. Meshio_Geom = namedtuple("Meshio_Geom", ["pts", "connectivity", "cell_ids"]) -MD_Meshio_Geom = Dict[int, Meshio_Geom] +MD_Meshio_Geom = Dict[int, Optional[Meshio_Geom]] # Interface between subdomains Interface = Tuple[pp.Grid, pp.Grid] @@ -372,6 +372,7 @@ def write_pvd( file_extension = file_extension.tolist() # Make sure that the inputs are consistent + assert isinstance(file_extension, list) assert len(file_extension) == times.shape[0] # Extract the time steps related to constant data and @@ -463,7 +464,7 @@ def write_pvd( def _sort_and_unify_data( self, - data: Optional[Union[DataInput, List[DataInput]]] = None, + data=None, # ignore type which is essentially Union[DataInput, List[DataInput]] ) -> Tuple[SubdomainData, InterfaceData]: """ Preprocess data. @@ -498,7 +499,7 @@ def _sort_and_unify_data( # to vector ranged values when suitable. def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: """ - Check wether the value array has the right dimension corresponding + Check whether the value array has the right dimension corresponding to the grid size. If possible, translate the value to a vectorial object, But do nothing if the data naturally can be interpreted as scalar data. @@ -529,42 +530,29 @@ def isinstance_interface(e: Interface) -> bool: and isinstance(e[1], pp.Grid) ) - def isinstance_Tuple_subdomain_str( - t: Tuple[Union[pp.Grid, List[pp.Grid]], str] - ) -> bool: + def isinstance_Tuple_subdomains_str(t: Tuple[List[pp.Grid], str]) -> bool: """ - Implementation of isinstance(t, Tuple[Union[pp.Grid, List[pp.Grid]], str]). + Implementation of isinstance(t, Tuple[List[pp.Grid], str]). - Detects data input of type (g1, "name) and ([g1, g2, ...], "name"), + Detects data input of type ([g1, g2, ...], "name"), where g1, g2, ... are subdomains. """ - # Implementation of isinstance(Tuple[pp.Grid, str], t) - types = list(map(type, t)) - if isinstance(t[0], pp.Grid) and types[1:] == [str]: - return True # Implementation of isinstance(Tuple[List[pp.Grid], str], t) - elif types == [list, str]: - return all([isinstance(g, pp.Grid) for g in t[0]]) - else: - return False + return list(map(type, t)) == [list, str] and all( + [isinstance(g, pp.Grid) for g in t[0]] + ) - def isinstance_Tuple_interface_str( - t: Tuple[Union[Interface, List[Interface]], str] - ) -> bool: + def isinstance_Tuple_interfaces_str(t: Tuple[List[Interface], str]) -> bool: """ - Implementation of isinstance(t, Tuple[Union[Edge, List[Edge]], str]). + Implementation of isinstance(t, Tuple[List[Interface], str]). - Detects data input of type (e1, "name) and ([e1, e2, ...], "name"), + Detects data input of type ([e1, e2, ...], "name"), where e1, e2, ... are interfaces. """ - # Implementation of isinstance(t, Tuple[Tuple[pp.Grid, pp.Grid], str]) - if list(map(type, t)) == [tuple, str]: - return isinstance_interface(t[0]) # Implementation of isinstance(t, Tuple[List[Tuple[pp.Grid, pp.Grid]], str]) - elif list(map(type, t)) == [list, str]: - return all([isinstance_interface(g) for g in pt[0]]) - else: - return False + return list(map(type, t)) == [list, str] and all( + [isinstance_interface(g) for g in t[0]] + ) def isinstance_Tuple_subdomain_str_array( t: Tuple[pp.Grid, str, np.ndarray] @@ -649,11 +637,10 @@ def isinstance_Tuple_str_array(pt) -> bool: # Case 2a: Data provided by a tuple (g, key). # Here, g is a single grid or a list of grids. # Idea: Collect the data only among the grids specified. - elif isinstance_Tuple_subdomain_str(pt): + elif isinstance_Tuple_subdomains_str(pt): - # By construction, the first component contains grids. - # Unify by converting the first component to a list - grids: List[pp.Grid] = pt[0] if isinstance(pt[0], list) else [pt[0]] + # By construction, the first component contains is a list of grids. + grids: List[pp.Grid] = pt[0] # By construction, the second component contains a key. key = pt[1] @@ -678,15 +665,12 @@ def isinstance_Tuple_str_array(pt) -> bool: subdomain_data[(g, key)] = value # Case 2b: Data provided by a tuple (e, key) - # Here, e is a single edge or a list of edges. + # Here, e is a list of edges. # Idea: Collect the data only among the grids specified. - elif isinstance_Tuple_interface_str(pt): + elif isinstance_Tuple_interfaces_str(pt): - # By construction, the first component contains grids. - # Unify by converting the first component to a list - edges: Union[Interface, List[Interface]] = ( - pt[0] if isinstance(pt[0], list) else [pt[0]] - ) + # By construction, the first component contains a list of interfaces. + edges: List[Interface] = pt[0] # By construction, the second component contains a key. key = pt[1] @@ -895,7 +879,7 @@ def _export_data_vtu( msg = "_export_data_vtu got unexpected keyword argument '{}'" raise TypeError(msg.format(kwargs.popitem()[0])) - # Fetch the dimnensions to be traversed. For subdomains, fetch the dimensions + # Fetch the dimensions to be traversed. For subdomains, fetch the dimensions # of the available grids, and for interfaces fetch the dimensions of the available # mortar grids. is_subdomain_data: bool = not self.interface_data @@ -923,9 +907,8 @@ def _export_data_vtu( # subdomains with correct grid dimension for subdomain data, and # interfaces with correct mortar grid dimension for interface data. # TODO update and simplify when new GridTree structure is in place. - entities: Union[List[pp.Grid], List[Tuple[pp.Grid, pp.Grid]]] = list() if is_subdomain_data: - entities = self.gb.get_grids(lambda g: g.dim == dim) + entities = self.gb.grids_of_dimension(dim).tolist() else: entities = [ e for e, d in self.gb.edges() if d["mortar_grid"].dim == dim @@ -1049,9 +1032,7 @@ def _update_meshio_geom(self) -> None: # Export and store self.m_meshio_geom[dim] = self._export_grid(mg, dim) - def _export_grid( - self, gs: Iterable[pp.Grid], dim: int - ) -> Union[None, Tuple[np.ndarray, np.ndarray, np.ndarray]]: + def _export_grid(self, gs: Iterable[pp.Grid], dim: int) -> Union[None, Meshio_Geom]: """ Wrapper function to export grids of dimension dim. Calls the appropriate dimension specific export function. @@ -1061,8 +1042,8 @@ def _export_grid( dim (int): Dimension of the subdomains. Returns: - Tuple[np.ndarray, np.ndarray, np.ndarray]: Points, cells (storing the - connectivity), and cell ids in correct meshio format. + Meshio_Geom: Points, cells (storing the connectivity), and cell ids + in correct meshio format. """ if dim == 0: return None @@ -1075,9 +1056,7 @@ def _export_grid( else: raise ValueError(f"Unknown dimension {dim}") - def _export_1d( - self, gs: Iterable[pp.Grid] - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + def _export_1d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: """ Export the geometrical data (point coordinates) and connectivity information from the 1d PorePy grids to meshio. @@ -1086,8 +1065,8 @@ def _export_1d( gs (Iterable[pp.Grid]): 1d grids. Returns: - Tuple[np.ndarray, np.ndarray, np.ndarray]: Points, 1d cells (storing the - connectivity), and cell ids in correct meshio format. + Meshio_Geom: Points, 1d cells (storing the connectivity), + and cell ids in correct meshio format. """ # In 1d each cell is a line @@ -1177,9 +1156,7 @@ def _simplex_cell_to_nodes( return cn_indices - def _export_2d( - self, gs: Iterable[pp.Grid] - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + def _export_2d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: """ Export the geometrical data (point coordinates) and connectivity information from the 2d PorePy grids to meshio. @@ -1188,8 +1165,8 @@ def _export_2d( gs (Iterable[pp.Grid]): 2d grids. Returns: - Tuple[np.ndarray, np.ndarray, np.ndarray]: Points, 2d cells (storing the - connectivity), and cell ids in correct meshio format. + Meshio_Geom: Points, 2d cells (storing the connectivity), and + cell ids in correct meshio format. """ # Use standard names for simple object types: in this routine only triangle @@ -1437,9 +1414,7 @@ def _function_to_compile(lines): # Run numba compiled function return _function_to_compile(lines) - def _export_3d( - self, gs: Iterable[pp.Grid] - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + def _export_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: """ Export the geometrical data (point coordinates) and connectivity information from 3d PorePy grids to meshio. @@ -1448,8 +1423,8 @@ def _export_3d( gs (Iterable[pp.Grid]): 3d grids. Returns: - Tuple[np.ndarray, np.ndarray, np.ndarray]: Points, 3d cells (storing the - connectivity), and cell ids in correct meshio format. + Meshio_Geom: Points, 3d cells (storing the connectivity), and + cell ids in correct meshio format. """ # FIXME The current implementation on first sight could suggest that @@ -1676,9 +1651,11 @@ def _export_3d( meshio_cell_id = np.empty(num_blocks, dtype=object) # Store cells with special cell types. - for block, (cell_type, cell_block) in enumerate(cell_to_nodes.items()): - meshio_cells[block] = meshio.CellBlock(cell_type, cell_block.astype(int)) - meshio_cell_id[block] = np.array(cell_id[cell_type]) + for block_s, (cell_type_s, cell_block_s) in enumerate(cell_to_nodes.items()): + meshio_cells[block_s] = meshio.CellBlock( + cell_type_s, cell_block_s.astype(int) + ) + meshio_cell_id[block_s] = np.array(cell_id[cell_type_s]) # Store general polyhedra cells. for block, (cell_type, cell_block) in enumerate(cell_to_faces.items()): From 71cc3b1acc2d4e60045fc33e2535f8b8081ef278 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Wed, 25 May 2022 12:40:29 +0200 Subject: [PATCH 48/83] MAINT: Refactor export of 3d grids. meshio does not allow for multiple cell types in one vtk file. Hence, a 3d grid should be identified as a pure tetrahedron, hexahedron or polyhedron grid. This distinction is now better represented in the code structure by spearating the export. --- src/porepy/viz/exporter.py | 434 +++++++++++++++++++++---------------- 1 file changed, 253 insertions(+), 181 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index be275c03c0..edf51f8c3f 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -11,7 +11,7 @@ import os import sys from collections import namedtuple -from typing import Dict, Iterable, List, Optional, Tuple, Union +from typing import Dict, Iterable, List, Optional, Set, Tuple, Union import meshio import numpy as np @@ -95,9 +95,6 @@ class Exporter: In general, pvd files gather data exported in separate files, including data on differently dimensioned grids, constant data, and finally time steps. - - deltaT is a scalar, denoting the time step size, and steps - In the case of different keywords, change the file name with "change_name". @@ -211,6 +208,10 @@ def add_constant_data( edge data, prescribed through strings, or tuples of grids/edges, keys and values. If not provided only geometical infos are exported. + + NOTE: The user has to make sure that each unique key has to + associated data values for all or no grids of each specific + dimension. """ # Identify change in constant data. Has the effect that # the constant data container will be exported at the @@ -248,6 +249,10 @@ def write_vtu( edge data, prescribed through strings, or tuples of grids/edges, keys and values. If not provided only geometical infos are exported. + + NOTE: The user has to make sure that each unique key has to + associated data values for all or no grids of each specific + dimension. time_dependent (boolean, optional): If False, file names will not be appended with an index that markes the time step. Can be overwritten by giving a value to time_step; if not, @@ -924,7 +929,9 @@ def _export_data_vtu( values.append(data[(e, key)]) # Require data for all or none entities of that dimension. - # FIXME: By changing the export strategy, this could be fixed. + # NOTE: As implemented now, for any grid dimension and any prescribed + # key, one has to provide data for all available grids of that + # dimension. if len(values) not in [0, len(entities)]: raise ValueError( f"""Insufficient amount of data provided for @@ -1427,26 +1434,213 @@ def _export_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: cell ids in correct meshio format. """ - # FIXME The current implementation on first sight could suggest that - # grids with varying cell types are allowed. However, this is not the - # case. Rewriting the code makeing this distinction more clear would - # allow for better overview. - # Three different cell types will be distinguished: Tetrahedra, hexahedra, # and general polyhedra. meshio does not allow for a mix of cell types - # in 3d within a single grid. Thus, if a grid contains cells of varying - # types, all cells will be casted as polyhedra. - - # Dictionaries storing cell->faces and cell->nodes connectivity - # information for all cell types. For the special meshio geometric - # types "tetra" and "hexahedron", cell->nodes will be used; the - # local node ordering for each cell has to comply with the convention - # in meshio. For the general "polyhedron" type, instead the connectivity - # will be described by prescribing cell->faces->nodes connectivity, i.e., - # for each cell, all faces are listed, and for all these faces, its - # nodes are identified, ordered such that they describe a circular chain. + # in 3d within a single grid (and we export all 3d grids at once). Thus, + # if the 3d grids contains cells of varying types, all cells will be casted + # as polyhedra. + + # Determine the cell types present among all grids. + cell_types: Set[str] = set() + for g in gs: + + # The number of faces per cell wil be later used to determining + # the cell types + num_faces_per_cell = np.unique(g.cell_faces.getnnz(axis=0)) + + if num_faces_per_cell.shape[0] == 1: + n = num_faces_per_cell[0] + if n == 4: + cell_types.add("tetra") + elif n == 6 and isinstance(g, pp.CartGrid): + cell_types.add("hexahedron") + else: + cell_types.add("polyhedron") + else: + cell_types.add("polyhedron") + + # Use dedicated export for each grid type + if len(cell_types) == 1 and "tetra" in cell_types: + return self._export_simplex_3d(gs) + elif len(cell_types) == 1 and "hexahedron" in cell_types: + return self._export_hexahedron_3d(gs) + else: + return self._export_polyhedron_3d(gs) + + def _export_simplex_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: + """ + Export the geometrical data (point coordinates) and connectivity + information from 3d PorePy simplex grids to meshio. + + Parameters: + gs (Iterable[pp.Grid]): 3d simplex grids. + + Returns: + Meshio_Geom: Points, 3d cells (storing the connectivity), and + cell ids in correct meshio format. + """ + # For the special meshio geometric type "tetra", cell->nodes will + # be used to store connectivity information. Each simplex has 4 nodes. + cell_to_nodes = np.empty((0, 4), dtype=int) + + # Dictionary collecting all cell ids for each cell type. + cell_id: List[int] = [] + + # Data structure for storing node coordinates of all 3d grids. + num_pts = np.sum([g.num_nodes for g in gs]) + meshio_pts = np.empty((num_pts, 3)) # type: ignore + + # Initialize offsets. Required taking into account multiple 3d grids. + nodes_offset = 0 + cell_offset = 0 + + # Treat each 3d grid separately. + for g in gs: + + # Store node coordinates + sl = slice(nodes_offset, nodes_offset + g.num_nodes) + meshio_pts[sl, :] = g.nodes.T + + # Identify all cells as tetrahedra. + cells = np.arange(g.num_cells) + + # Add offset taking into account previous grids + cell_id += (cells + cell_offset).tolist() + + # Determine local connectivity for all cells. Simplices are invariant + # under permuatations. Thus, no specific ordering of nodes is required. + cn_indices = self._simplex_cell_to_nodes(4, g, cells) + + # Store cell-node connectivity, and add offset taking into account + # previous grids + cell_to_nodes = np.vstack((cell_to_nodes, cn_indices + nodes_offset)) + + # Update offset + nodes_offset += g.num_nodes + cell_offset += g.num_cells + + # Initialize the meshio data structure for the connectivity and cell ids. + # There is only one cell type. + meshio_cells = np.empty(1, dtype=object) + meshio_cell_id = np.empty(1, dtype=object) + meshio_cells[0] = meshio.CellBlock("tetra", cell_to_nodes.astype(int)) + meshio_cell_id[0] = np.array(cell_id) + + # Return final meshio data: points, cell (connectivity), cell ids + return Meshio_Geom(meshio_pts, meshio_cells, meshio_cell_id) + + def _export_hexahedron_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: + """ + Export the geometrical data (point coordinates) and connectivity + information from 3d PorePy hexahedron grids to meshio. + + NOTE: Optimally, a StructuredGrid would be exported in this case. + However, meshio does solely handle unstructured grids and cannot + for instance write to *.vtr format. + + Parameters: + gs (Iterable[pp.Grid]): 3d hexahedron grids. + + Returns: + Meshio_Geom: Points, 3d cells (storing the connectivity), and + cell ids in correct meshio format. + """ + # For the special meshio geometric type "hexahedron", cell->nodes will + # be used to store connectivity information. Each hexahedron has 8 nodes. + cell_to_nodes = np.empty((0, 8), dtype=int) + + # Dictionary collecting all cell ids for each cell type. + cell_id: List[int] = [] + + # Data structure for storing node coordinates of all 3d grids. + num_pts = np.sum([g.num_nodes for g in gs]) + meshio_pts = np.empty((num_pts, 3)) # type: ignore + + # Initialize offsets. Required taking into account multiple 3d grids. + nodes_offset = 0 + cell_offset = 0 + + # Treat each 3d grid separately. + for g in gs: + + # Store node coordinates + sl = slice(nodes_offset, nodes_offset + g.num_nodes) + meshio_pts[sl, :] = g.nodes.T + + # Identify all cells as tetrahedra. + cells = np.arange(g.num_cells) + + # Add offset taking into account previous grids + cell_id += (cells + cell_offset).tolist() + + # Determine local connectivity for all cells. Simplices are invariant + # under permuatations. Thus, no specific ordering of nodes is required. + + # After all, the procedure is fairly similar to the treatment of + # tetra cells. Most of the following code is analogous to + # _simplex_cell_to_nodes(). However, for hexaedron cells the order + # of the nodes is important and has to be adjusted. Based on the + # specific definition of TensorGrids however, the node numbering + # can be hardcoded, which results in better performance compared + # to a modular procedure. + + # Determine cell-node ptr + cn_indptr = ( + g.cell_nodes().indptr[:-1] + if cells is None + else g.cell_nodes().indptr[cells] + ) + + # Collect the indptr to all nodes of the cell; + # each n-simplex cell contains n nodes + expanded_cn_indptr = np.vstack([cn_indptr + i for i in range(8)]).reshape( + -1, order="F" + ) + + # Detect all corresponding nodes by applying the expanded mask to indices + expanded_cn_indices = g.cell_nodes().indices[expanded_cn_indptr] + + # Convert to right format. + cn_indices = np.reshape(expanded_cn_indices, (-1, 8), order="C") + + # Reorder nodes to comply with the node numbering convention of meshio + # regarding hexahedron cells. + cn_indices = cn_indices[:, [0, 2, 6, 4, 1, 3, 7, 5]] + + # Store cell-node connectivity, and add offset taking into account + # previous grids + cell_to_nodes = np.vstack((cell_to_nodes, cn_indices + nodes_offset)) + + # Update offset + nodes_offset += g.num_nodes + cell_offset += g.num_cells + + # Initialize the meshio data structure for the connectivity and cell ids. + # There is only one cell type. + meshio_cells = np.empty(1, dtype=object) + meshio_cell_id = np.empty(1, dtype=object) + meshio_cells[0] = meshio.CellBlock("hexahedron", cell_to_nodes.astype(int)) + meshio_cell_id[0] = np.array(cell_id) + + # Return final meshio data: points, cell (connectivity), cell ids + return Meshio_Geom(meshio_pts, meshio_cells, meshio_cell_id) + + def _export_polyhedron_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: + """ + Export the geometrical data (point coordinates) and connectivity + information from 3d PorePy polyhedron grids to meshio. + + Parameters: + gs (Iterable[pp.Grid]): 3d polyhedron grids. + + Returns: + Meshio_Geom: Points, 3d cells (storing the connectivity), and + cell ids in correct meshio format. + """ + # For the general geometric type "polyhedron", cell->faces->nodes will + # be used to store connectivity information, which can become significantly + # larger than cell->nodes information used for special type grids. cell_to_faces: Dict[str, List[List[int]]] = {} - cell_to_nodes: Dict[str, np.ndarray] = {} # Dictionary collecting all cell ids for each cell type. cell_id: Dict[str, List[int]] = {} @@ -1470,29 +1664,13 @@ def _export_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # the cell types num_faces_per_cell = g.cell_faces.getnnz(axis=0) - # Loop over all available cell types and group cells of one type. + # Categorize all polyhedron cells by their number of faces. + # Each category will be treated separatrely allowing for using + # fitting datastructures. g_cell_map = dict() for n in np.unique(num_faces_per_cell): - # Define the cell type. - # Make sure that either all cells are identified as "tetra", - # "hexahedron", or "polyehdron". "tetra" grids have the unique - # property that all cells have 4 faces; "hexahdron" grids in - # PorePy are solely of CartGrid type, and all cells have 6 - # faces (NOTE that coarsening a grid does not change its type, - # hence checking solely the type is not sufficient); all other - # grids will be identified as "polyehdron" grids, even if they - # contain single "tetra" cells. - if n == 4 and len(set(num_faces_per_cell)) == 1: - cell_type = "tetra" - elif ( - isinstance(g, pp.CartGrid) - and n == 6 - and len(set(num_faces_per_cell)) == 1 - ): - cell_type = "hexahedron" - else: - cell_type = f"polyhedron{n}" + cell_type = f"polyhedron{n}" # Find all cells with n faces, and store for later use cells = np.nonzero(num_faces_per_cell == n)[0] @@ -1509,158 +1687,52 @@ def _export_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # treatment of special and general cell types and to conform # with array sizes (for polyhedra), treat each cell type # separately. - for n in np.unique(num_faces_per_cell): - - # Define the cell type as above - if n == 4 and len(set(num_faces_per_cell)) == 1: - cell_type = "tetra" - elif ( - isinstance(g, pp.CartGrid) - and n == 6 - and len(set(num_faces_per_cell)) == 1 - ): - cell_type = "hexahedron" - else: - cell_type = f"polyhedron{n}" - - # Special case: Tetrahedra - if cell_type == "tetra": - # Since tetra is a meshio-known cell type, cell_to_nodes connectivity - # information is provided, i.e., for each cell the nodes in the - # meshio-defined local numbering of nodes is generated. Since tetra - # is a simplex, the nodes do not need to be ordered in any specific - # type, as the geometrical object is invariant under permutations. - - # Fetch tetra cells - cells = g_cell_map[cell_type] - - # Tetrahedra are simplices and have a trivial connectivity. - cn_indices = self._simplex_cell_to_nodes(4, g, cells) - - # Initialize data structure if not available yet - if cell_type not in cell_to_nodes: - cell_to_nodes[cell_type] = np.empty((0, 4), dtype=int) - - # Store cell-node connectivity, and add offset taking into account - # previous grids - cell_to_nodes[cell_type] = np.vstack( - (cell_to_nodes[cell_type], cn_indices + nodes_offset) - ) - - elif cell_type == "hexahedron": - # NOTE: Optimally, a StructuredGrid would be exported in this case. - # However, meshio does solely handle unstructured grids and cannot - # for instance write to *.vtr format. - - # Similar to "tetra", "hexahedron" is a meshio-known cell type. - # Thus, the local connectivity is prescribed by cell-nodes - # information. - - # After all, the procedure is fairly similar to the treatment of - # tetra cells. Most of the following code is analogous to - # _simplex_cell_to_nodes(). However, for hexaedron cells the order - # of the nodes is important and has to be adjusted. Based on the - # specific definition of TensorGrids however, the node numbering - # can be hardcoded, which results in better performance compared - # to a modular procedure. - - # Need to sort the nodes according to the convention within meshio. - # Fetch hexahedron cells - cells = g_cell_map[cell_type] - - # Determine cell-node ptr - cn_indptr = ( - g.cell_nodes().indptr[:-1] - if cells is None - else g.cell_nodes().indptr[cells] - ) - - # Collect the indptr to all nodes of the cell; - # each n-simplex cell contains n nodes - expanded_cn_indptr = np.vstack( - [cn_indptr + i for i in range(8)] - ).reshape(-1, order="F") - - # Detect all corresponding nodes by applying the expanded mask to indices - expanded_cn_indices = g.cell_nodes().indices[expanded_cn_indptr] - - # Convert to right format. - cn_indices = np.reshape(expanded_cn_indices, (-1, 8), order="C") - - # Reorder nodes to comply with the node numbering convention of meshio - # regarding hexahedron cells. - cn_indices = cn_indices[:, [0, 2, 6, 4, 1, 3, 7, 5]] - - # Initialize data structure if not available yet - if cell_type not in cell_to_nodes: - cell_to_nodes[cell_type] = np.empty((0, 8), dtype=int) - - # Store cell-node connectivity, and add offset taking into account - # previous grids - cell_to_nodes[cell_type] = np.vstack( - (cell_to_nodes[cell_type], cn_indices + nodes_offset) - ) - - else: - # Identify remaining cells as polyhedra - - # The general strategy is to define the connectivity as cell-face - # information, where the faces are defined by nodes. Hence, this - # information is significantly larger than the info provided for - # tetra cells. Here, we make use of the fact that g.face_nodes - # provides nodes ordered wrt. the right-hand rule. - - # Fetch cells with n faces - cells = g_cell_map[cell_type] - - # Store short cuts to cell-face and face-node information - cf_indptr = g.cell_faces.indptr - cf_indices = g.cell_faces.indices - fn_indptr = g.face_nodes.indptr - fn_indices = g.face_nodes.indices - - # Determine the cell-face connectivity (with faces described by their - # nodes ordered such that they form a chain and are identified by the - # face boundary. The final data format is a List[List[np.ndarray]]. - # The outer list, loops over all cells. Each cell entry contains a - # list over faces, and each face entry is given by the face nodes. - if cell_type not in cell_to_faces: - cell_to_faces[cell_type] = [] - cell_to_faces[cell_type] += [ - [ - fn_indices[fn_indptr[f] : fn_indptr[f + 1]] # nodes - for f in cf_indices[ - cf_indptr[c] : cf_indptr[c + 1] - ] # faces - ] - for c in cells # cells + for cell_type in g_cell_map.keys(): + + # The general strategy is to define the connectivity as cell-face-nodes + # information, where the faces are defined by nodes. Hence, this + # information is significantly larger than the info provided for + # tetra cells. Here, we make use of the fact that g.face_nodes + # provides nodes ordered wrt. the right-hand rule. + + # Fetch cells with n faces + cells = g_cell_map[cell_type] + + # Store short cuts to cell-face and face-node information + cf_indptr = g.cell_faces.indptr + cf_indices = g.cell_faces.indices + fn_indptr = g.face_nodes.indptr + fn_indices = g.face_nodes.indices + + # Determine the cell-face connectivity (with faces described by their + # nodes ordered such that they form a chain and are identified by the + # face boundary. The final data format is a List[List[np.ndarray]]. + # The outer list, loops over all cells. Each cell entry contains a + # list over faces, and each face entry is given by the face nodes. + if cell_type not in cell_to_faces: + cell_to_faces[cell_type] = [] + cell_to_faces[cell_type] += [ + [ + fn_indices[fn_indptr[f] : fn_indptr[f + 1]] # nodes + for f in cf_indices[cf_indptr[c] : cf_indptr[c + 1]] # faces ] + for c in cells # cells + ] # Update offset nodes_offset += g.num_nodes cell_offset += g.num_cells - # Determine the total number of blocks. Recall that special cell types - # and general polyhedron{n} cells are stored differently. - num_special_blocks = len(cell_to_nodes) - num_polyhedron_blocks = len(cell_to_faces) - num_blocks = num_special_blocks + num_polyhedron_blocks + # Determine the total number of blocks / cell types. + num_blocks = len(cell_to_faces) # Initialize the meshio data structure for the connectivity and cell ids. meshio_cells = np.empty(num_blocks, dtype=object) meshio_cell_id = np.empty(num_blocks, dtype=object) - # Store cells with special cell types. - for block_s, (cell_type_s, cell_block_s) in enumerate(cell_to_nodes.items()): - meshio_cells[block_s] = meshio.CellBlock( - cell_type_s, cell_block_s.astype(int) - ) - meshio_cell_id[block_s] = np.array(cell_id[cell_type_s]) - - # Store general polyhedra cells. + # Store the cells in meshio format for block, (cell_type, cell_block) in enumerate(cell_to_faces.items()): # Adapt the block number taking into account of previous cell types. - block += num_special_blocks meshio_cells[block] = meshio.CellBlock(cell_type, cell_block) meshio_cell_id[block] = np.array(cell_id[cell_type]) From 51565154fe8330daa2b7969bbd3218bfdbbaecfb Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Wed, 25 May 2022 13:10:35 +0200 Subject: [PATCH 49/83] FIX: Only export constant data in pvd format if intended. --- src/porepy/viz/exporter.py | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index edf51f8c3f..73ad7bedd4 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -368,11 +368,6 @@ def write_pvd( """ if file_extension is None: file_extension = self._exported_timesteps - file_extension_constants = self._exported_timesteps_constants - indices = [self._exported_timesteps.index(e) for e in file_extension] - file_extension_constants = [ - self._exported_timesteps_constants[i] for i in indices - ] elif isinstance(file_extension, np.ndarray): file_extension = file_extension.tolist() @@ -380,17 +375,23 @@ def write_pvd( assert isinstance(file_extension, list) assert len(file_extension) == times.shape[0] - # Extract the time steps related to constant data and - # complying with file_extension. Implicitly test wheter - # file_extension is a subset of _exported_timesteps. + # Make mypy happy + assert file_extension is not None + + # Extract the time steps related to constant data and + # complying with file_extension. Implicitly test wheter + # file_extension is a subset of _exported_timesteps. + # Only if constant data has been exported. + include_constant_data = ( + self.export_constants_separately + and len(self._exported_timesteps_constants) > 0 + ) + if include_constant_data: indices = [self._exported_timesteps.index(e) for e in file_extension] file_extension_constants = [ self._exported_timesteps_constants[i] for i in indices ] - # Make mypy happy - assert file_extension is not None - # Perform the same procedure as in _export_gb_pvd # but looping over all designated time steps. @@ -408,9 +409,8 @@ def write_pvd( o_file.write(header) fm = '\t\n' - for time, fn, fn_constants in zip( - times, file_extension, file_extension_constants - ): + # Gather all data, and assign the actual time. + for time, fn in zip(times, file_extension): # Go through all possible data types, analogously to # _export_gb_pvd. @@ -432,8 +432,9 @@ def write_pvd( ) ) - # Constant data. - if self.export_constants_separately: + # Optionally, do the same for constant data. + if include_constant_data: + for time, fn_constants in zip(times, file_extension_constants): # Constant subdomain data. for dim in self.dims: if self.meshio_geom[dim] is not None: From 107e4ab0fdbbab364c6b2b0c06bea23e54114f01 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Wed, 25 May 2022 13:16:58 +0200 Subject: [PATCH 50/83] MAINT: Adapt specified data type after deprecated the use of some allowed inputs. --- src/porepy/viz/exporter.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 73ad7bedd4..49645bac53 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -39,11 +39,11 @@ # Keys for states str, # Subdomain specific data types - Tuple[Union[pp.Grid, List[pp.Grid]], str], + Tuple[List[pp.Grid], str], Tuple[pp.Grid, str, np.ndarray], Tuple[str, np.ndarray], # Interface specific data types - Tuple[Union[Interface, List[Interface]], str], + Tuple[List[Interface], str], Tuple[Interface, str, np.ndarray], ] From 23933358c08125996d7e4336887fa78cc65a22bd Mon Sep 17 00:00:00 2001 From: Ivar Stefansson Date: Wed, 25 May 2022 15:30:30 +0200 Subject: [PATCH 51/83] Update exporter.ipynb --- tutorials/exporter.ipynb | 96 ++++++++++++++++++++++++++++++---------- 1 file changed, 73 insertions(+), 23 deletions(-) diff --git a/tutorials/exporter.ipynb b/tutorials/exporter.ipynb index 20ac352d73..d80c6bded1 100644 --- a/tutorials/exporter.ipynb +++ b/tutorials/exporter.ipynb @@ -5,9 +5,9 @@ "metadata": {}, "source": [ "# Introduction\n", - "Currently, the standard procedure within PorePy is to export data to vtu and pvd format for visualization with ParaView. This tutorial explains how to use the 'Exporter'. In particular, it will showcase different ways to address data, how constant-in-time data is handled, and how pvd-files are managed. \n", + "Currently, the standard procedure within PorePy is to export data to vtu and pvd format for visualization with ParaView. This tutorial explains how to use the `Exporter`. In particular, it will showcase different ways to address data, how constant-in-time data is handled, and how pvd-files are managed. \n", "\n", - "First, an example data set is defined, then the actual exporter is defined, before currently all possible ways to export data are demonstrated.\n", + "First, an example data set is defined, then the actual exporter is defined, before all supported ways to export data are demonstrated.\n", "\n", "NOTE: Related but not necessary for this tutorial: it is highly recommended to read the ParaView documentation. " ] @@ -24,7 +24,15 @@ "cell_type": "code", "execution_count": 1, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], "source": [ "import numpy as np\n", "import porepy as pp\n", @@ -81,7 +89,16 @@ "cell_type": "code", "execution_count": 2, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Keys of the states defined on subdomains: {'u', 'contact_traction', 'p'}\n", + "Keys of the states defined on interfaces: {'mortar_u', 'mortar_p'}\n" + ] + } + ], "source": [ "# Determine all keys of all states on all subdomains\n", "subdomain_states = []\n", @@ -124,7 +141,8 @@ "In the following, we showcase how to use the main subroutines for exporting data:\n", "- write_vtu()\n", "- write_pvd()\n", - "The first, addresses the export of data for a specific time step, while the latter, gather the previous exports and collects them in a single file. This allows an easier analysis in ParaView." + "\n", + "The first addresses the export of data for a specific time step, while the latter gathers the previous exports and collects them in a single file. This allows an easier analysis in ParaView." ] }, { @@ -182,9 +200,27 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 9, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Mixed dimensional grid. \n", + "Maximum dimension present: 2 \n", + "Minimum dimension present: 0 \n", + "Size of highest dimensional grid: Cells: 280. Nodes: 178\n", + "In lower dimensions: \n", + "2 grids of dimension 1, with in total 16 cells and 20 nodes. \n", + "1 grids of dimension 0, with in total 1 cells and 0 nodes. \n", + "Total number of interfaces: 4\n", + "2 interfaces between grids of dimension 2 and 1\n", + "2 interfaces between grids of dimension 1 and 0\n", + "\n" + ] + } + ], "source": [ "g_2d = grids_2d[0]" ] @@ -200,7 +236,21 @@ "cell_type": "code", "execution_count": 7, "metadata": {}, - "outputs": [], + "outputs": [ + { + "ename": "ValueError", + "evalue": "The provided data type used for (Grid history: Compute geometry\nNumber of cells 280\nNumber of faces 456\nNumber of nodes 178\nDimension 2, 'u') is not supported.", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mValueError\u001b[0m Traceback (most recent call last)", + "\u001b[1;32m~\\AppData\\Local\\Temp/ipykernel_14264/4193921876.py\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[0mexporter_2\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mpp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mExporter\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgb\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"example-2\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"exporter-tutorial\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[0mexporter_2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwrite_vtu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mgrids_1d\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"p\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0mg_2d\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"u\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0medges_1d\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"mortar_p\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[1;32mc:\\users\\ivar\\onedrive - university of bergen\\documents\\github\\porepy\\src\\porepy\\viz\\exporter.py\u001b[0m in \u001b[0;36mwrite_vtu\u001b[1;34m(self, data, time_dependent, time_step, gb)\u001b[0m\n\u001b[0;32m 295\u001b[0m \u001b[1;31m# 1. Sort wrt. whether data is associated to subdomains or interfaces.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 296\u001b[0m \u001b[1;31m# 2. Unify data type.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 297\u001b[1;33m \u001b[0msubdomain_data\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minterface_data\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_sort_and_unify_data\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 298\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 299\u001b[0m \u001b[1;31m# Export constant data to separate or standard files\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mc:\\users\\ivar\\onedrive - university of bergen\\documents\\github\\porepy\\src\\porepy\\viz\\exporter.py\u001b[0m in \u001b[0;36m_sort_and_unify_data\u001b[1;34m(self, data)\u001b[0m\n\u001b[0;32m 750\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 751\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 752\u001b[1;33m raise ValueError(\n\u001b[0m\u001b[0;32m 753\u001b[0m \u001b[1;34mf\"The provided data type used for {pt} is not supported.\"\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 754\u001b[0m )\n", + "\u001b[1;31mValueError\u001b[0m: The provided data type used for (Grid history: Compute geometry\nNumber of cells 280\nNumber of faces 456\nNumber of nodes 178\nDimension 2, 'u') is not supported." + ] + } + ], "source": [ "exporter_2 = pp.Exporter(model.gb, \"example-2\", \"exporter-tutorial\")\n", "exporter_2.write_vtu([(grids_1d, \"p\"), (g_2d, \"u\"), (edges_1d, \"mortar_p\")])" @@ -211,12 +261,12 @@ "metadata": {}, "source": [ "## Example 3: Exporting explicitly defined data\n", - "There exists the possibility to export data which is not stored in the grid bucket under 'pp.STATE'. This capability requires defining tuples of (1) a single grid, (2) a key, and (3) the data vector. For example, let's export the cell centers of the 2D subdomain 'g_2d', as well as all interfaces (with different signs for the sake of the example). Again, we define a dedicated exporter for this task.\n" + "We can also export data which is not stored in the grid bucket under 'pp.STATE'. This capability requires defining tuples of (1) a single grid, (2) a key, and (3) the data vector. For example, let's export the cell centers of the 2D subdomain 'g_2d', as well as all interfaces (with different signs for the sake of the example). Again, we define a dedicated exporter for this task.\n" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -240,7 +290,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -258,7 +308,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -273,12 +323,12 @@ "metadata": {}, "source": [ "## Example 6: Exporting constant data\n", - "The export of both grid and geometry related data as well as heterogeneous material parameters may be of interest. However, these often do only change very seldomly in time or are even constant in time. In order to save storage space, constant data is stored separately. A multirate approach is used to address slowly changing \"constant\" data, which results in an extra set of output files. Everytime constant data has to be updated (in a time series), the output files are updated as well. " + "The export of both grid and geometry related data as well as heterogeneous material parameters may be of interest. However, these often do only change very seldomly in time or are even constant in time. In order to save storage space, constant data is stored separately. A multirate approach is used to address slowly changing \"constant\" data, which results in an extra set of output files. Every time constant data has to be updated (in a time series), the output files are updated as well. " ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -297,12 +347,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The default is that constant data is always printed to extra files. Since the vtu format requires geometrical and topoligical information on the mesh (points, connectivity etc.), this type of constant data is exported to each vtu file. Depending on the situation, this overhead can be in fact significant. Thus, one can also choose to print the constant data to the same files as the standard data, by setting a keyword when defining the exporter. With a similar setup as in part A, the same output is generated, but managed differently among files." + "The default is that constant data is always printed to extra files. Since the vtu format requires geometrical and topoligical information on the mesh (points, connectivity etc.), this type of constant data is exported to each vtu file. Depending on the situation, this overhead can be significant. Thus, one can also choose to print the constant data to the same files as the standard data, by setting a keyword when defining the exporter. With a similar setup as in part A, the same output is generated, but managed differently among files." ] }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -321,14 +371,14 @@ "metadata": {}, "source": [ "## Example 5 revisisted: PVD format\n", - "The pvd format collects previously exported data. At every application of 'write_vtu' a corresponding pvd file is generated, gather all 'vtu' files correpsonding to this time step. It is recommended to use the 'pvd' file for analyzing the data in ParaView.\n", + "The pvd format collects previously exported data. At every application of 'write_vtu' a corresponding pvd file is generated, which gathers all 'vtu' files correpsonding to this time step. It is recommended to use the 'pvd' file for analyzing the data in ParaView.\n", "\n", - "In addition, when considering a time series, it is possible to gather data connected to multiple time steps, and assign the actual time to each time step. Assume, Example 5 corresponds to an adaptive time stepping. We define the actual times, and write collect the exported data from Example 5 in a single pvd file." + "In addition, when considering a time series, it is possible to gather data connected to multiple time steps, and assign the actual time to each time step. Assume, Example 5 corresponds to an adaptive time stepping. We define the actual times, and collect the exported data from Example 5 in a single pvd file." ] }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -341,12 +391,12 @@ "metadata": {}, "source": [ "## Example 7: Exporting data on a single grid\n", - "It is also possible to export data without prescribing a grid bucket, but a single grid. In this case, one has to assign the data when writing to vtu. For this, a key and a data array (with suitable size) has to be provided." + "It is also possible to export data without prescribing a grid bucket, but a single grid. In this case, one has to assign the data when writing to vtu. For this, a key and a data array (with suitable size) have to be provided." ] }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -357,7 +407,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -371,7 +421,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.4" + "version": "3.9.7" } }, "nbformat": 4, From 0812dea370a119e219b1b745586aefeb80fb15ce Mon Sep 17 00:00:00 2001 From: jwboth <73653591+jwboth@users.noreply.github.com> Date: Fri, 3 Jun 2022 12:48:09 +0200 Subject: [PATCH 52/83] Apply suggestions from code review DOC: Fix typos. Co-authored-by: Ivar Stefansson --- src/porepy/viz/exporter.py | 50 +++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 49645bac53..e8c142d678 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -34,7 +34,7 @@ # Interface between subdomains Interface = Tuple[pp.Grid, pp.Grid] -# Altogether allowed data structures to define data for exporting +# All allowed data structures to define data for exporting DataInput = Union[ # Keys for states str, @@ -89,7 +89,7 @@ class Exporter: save.write_vtu(["pressure", "displacement"], time_step=i) save.write_pvd(times) - where times, is a list of actual times (not time steps), associated + where times is a list of actual times (not time steps), associated to the previously exported time steps. In general, pvd files gather data exported in separate files, including @@ -115,13 +115,13 @@ def __init__( gb (Union[pp.Grid, pp.Gridbucket]): grid or gridbucket containing all mesh information to be exported. file_name (str): basis for file names used for storing the output - folder_name (str, optional): folder name, all files are stored in + folder_name (str, optional): name of the folder in which files are stored kwargs (optional): Optional keywords; 'fixed_grid' (boolean) to control whether the grid(bucket) may be redfined (default True); - 'binary' (boolean) controling whether data is stored in binary format + 'binary' (boolean) controlling whether data is stored in binary format (default True); - 'export_constants_separately' (boolean) controling whether + 'export_constants_separately' (boolean) controlling whether constant data is exported in separate files (default True). """ # Exporter is operating on grid buckets. Convert to grid bucket if grid is provided. @@ -209,7 +209,7 @@ def add_constant_data( grids/edges, keys and values. If not provided only geometical infos are exported. - NOTE: The user has to make sure that each unique key has to + NOTE: The user has to make sure that each unique key has associated data values for all or no grids of each specific dimension. """ @@ -248,16 +248,16 @@ def write_vtu( data (Union[DataInput, List[DataInput]], optional): node and edge data, prescribed through strings, or tuples of grids/edges, keys and values. If not provided only - geometical infos are exported. + geometrical infos are exported. - NOTE: The user has to make sure that each unique key has to + NOTE: The user has to make sure that each unique key has associated data values for all or no grids of each specific dimension. time_dependent (boolean, optional): If False, file names will - not be appended with an index that markes the time step. + not be appended with an index that marks the time step. Can be overwritten by giving a value to time_step; if not, the file names will subsequently be ending with 1, 2, etc. - time_step (int, optional): will be used ass eppendic to define + time_step (int, optional): will be used as appendix to define the file corresponding to this specific time step. gb (Union[pp.Grid, pp.Gridbucket], optional): grid or gridbucket if it is not fixed and should be updated. @@ -322,7 +322,7 @@ def write_vtu( # mesh is updated or new constant data is added. self._exported_constant_data_up_to_date = True - # Store the timestep refering to the origin of the constant data + # Store the timestep referring to the origin of the constant data if self._time_step_constant_data: self._exported_timesteps_constants.append(self._time_step_constant_data) else: @@ -379,7 +379,7 @@ def write_pvd( assert file_extension is not None # Extract the time steps related to constant data and - # complying with file_extension. Implicitly test wheter + # complying with file_extension. Implicitly test whether # file_extension is a subset of _exported_timesteps. # Only if constant data has been exported. include_constant_data = ( @@ -507,7 +507,7 @@ def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: """ Check whether the value array has the right dimension corresponding to the grid size. If possible, translate the value to a vectorial - object, But do nothing if the data naturally can be interpreted as + object, but do nothing if the data naturally can be interpreted as scalar data. """ # Make some checks @@ -645,7 +645,7 @@ def isinstance_Tuple_str_array(pt) -> bool: # Idea: Collect the data only among the grids specified. elif isinstance_Tuple_subdomains_str(pt): - # By construction, the first component contains is a list of grids. + # By construction, the first component is a list of grids. grids: List[pp.Grid] = pt[0] # By construction, the second component contains a key. @@ -672,7 +672,7 @@ def isinstance_Tuple_str_array(pt) -> bool: # Case 2b: Data provided by a tuple (e, key) # Here, e is a list of edges. - # Idea: Collect the data only among the grids specified. + # Idea: Collect the data only among the specified interfaces. elif isinstance_Tuple_interfaces_str(pt): # By construction, the first component contains a list of interfaces. @@ -866,7 +866,7 @@ def _export_data_vtu( and passing further to the final writing routine. For each fixed dimension, all subdomains of that dimension and the data - related to that subdomains will be exported simultaneously. + related to those subdomains will be exported simultaneously. Analogously for interfaces. Parameters: @@ -931,7 +931,7 @@ def _export_data_vtu( # Require data for all or none entities of that dimension. # NOTE: As implemented now, for any grid dimension and any prescribed - # key, one has to provide data for all available grids of that + # key, one has to provide data for all or none of the grids of that # dimension. if len(values) not in [0, len(entities)]: raise ValueError( @@ -992,7 +992,7 @@ def _export_gb_pvd(self, file_name: str, time_step: Optional[int]) -> None: # If constant data is exported to separate vtu files, also include # these here. The procedure is similar to the above, but the file names - # incl. the relevant time step has to be adjusted. + # incl. the relevant time step have to be adjusted. if self.export_constants_separately: # Constant subdomain data. for dim in self.dims: @@ -1315,7 +1315,7 @@ def _export_2d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # the corresponding cells with ids from cell_id. for block, (cell_type, cell_block) in enumerate(cell_to_nodes.items()): - # Meshio requires the keyword "polgon" for general polygons. + # Meshio requires the keyword "polygon" for general polygons. # Thus, remove the number of nodes associated to polygons. cell_type_meshio_format = "polygon" if "polygon" in cell_type else cell_type meshio_cells[block] = meshio.CellBlock( @@ -1359,7 +1359,7 @@ def _function_to_compile(lines): # Retrieve number of chains and lines per chain from the shape. # Implicitly expect that all chains have the same length num_chains, chain_length = lines.shape - # Since for each chain lines includes two rows, take the half + # Since for each chain lines includes two rows, divide by two num_chains = int(num_chains / 2) # Initialize array of sorted lines to be the final output @@ -1371,7 +1371,7 @@ def _function_to_compile(lines): found = np.zeros(chain_length) found[0] = 1 - # Loop over chains and Consider each chain separately. + # Loop over chains and consider each chain separately. for c in range(num_chains): # Initialize found making any line segment aside of the first a candidate found[1:] = 0 @@ -1380,7 +1380,7 @@ def _function_to_compile(lines): # line segment prev = sorted_lines[2 * c + 1, 0] - # The sorting algorithm: Loop over all position in the chain to be set next. + # The sorting algorithm: Loop over all positions in the chain to be set next. # Find the right candidate to be moved to this position and possibly flipped # if needed. A candidate is identified as fitting if it contains one point # equal to the current starting point. This algorithm uses a double loop, @@ -1438,7 +1438,7 @@ def _export_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # Three different cell types will be distinguished: Tetrahedra, hexahedra, # and general polyhedra. meshio does not allow for a mix of cell types # in 3d within a single grid (and we export all 3d grids at once). Thus, - # if the 3d grids contains cells of varying types, all cells will be casted + # if the 3d grids contains cells of varying types, all cells will be cast # as polyhedra. # Determine the cell types present among all grids. @@ -1708,7 +1708,7 @@ def _export_polyhedron_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # Determine the cell-face connectivity (with faces described by their # nodes ordered such that they form a chain and are identified by the # face boundary. The final data format is a List[List[np.ndarray]]. - # The outer list, loops over all cells. Each cell entry contains a + # The outer list loops over all cells. Each cell entry contains a # list over faces, and each face entry is given by the face nodes. if cell_type not in cell_to_faces: cell_to_faces[cell_type] = [] @@ -1825,7 +1825,7 @@ def _make_file_name( """ Auxiliary method to setting up file name. - The final name is build as combination of a prescribed prefix, + The final name is built as combination of a prescribed prefix, and possibly the dimension of underlying grid and time (step) the related data is associated to. From b51903308d90a6d21d7c39729d886fb0d31debc4 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Tue, 14 Jun 2022 10:44:03 +0200 Subject: [PATCH 53/83] Fix: Require list of grids when addressing data for a single dimension. --- tutorials/exporter.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tutorials/exporter.ipynb b/tutorials/exporter.ipynb index d80c6bded1..1ccd1c5d2b 100644 --- a/tutorials/exporter.ipynb +++ b/tutorials/exporter.ipynb @@ -229,7 +229,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We export pressure on all 1D subdomains, displacements on a single 2D subdomain, and the mortars on all interfaces. For this, we use tuples of grid(s) and keys. In order to not overwrite the previous data, we define a new exporter." + "We export pressure on all 1D subdomains, displacements on all 2D subdomains, and the mortars on all interfaces. For this, we use tuples of grid(s) and keys. In order to not overwrite the previous data, we define a new exporter." ] }, { @@ -253,7 +253,7 @@ ], "source": [ "exporter_2 = pp.Exporter(model.gb, \"example-2\", \"exporter-tutorial\")\n", - "exporter_2.write_vtu([(grids_1d, \"p\"), (g_2d, \"u\"), (edges_1d, \"mortar_p\")])" + "exporter_2.write_vtu([(grids_1d, \"p\"), (grids_2d, \"u\"), (edges_1d, \"mortar_p\")])" ] }, { From 20d709196d5ba6a064c54c8e707922643231fac6 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Tue, 14 Jun 2022 11:46:28 +0200 Subject: [PATCH 54/83] MAINT: Add comments on how to use argument time_dependent, and times. --- tutorials/exporter.ipynb | 112 +++++++++++++++++---------------------- 1 file changed, 48 insertions(+), 64 deletions(-) diff --git a/tutorials/exporter.ipynb b/tutorials/exporter.ipynb index 1ccd1c5d2b..30d28d18dc 100644 --- a/tutorials/exporter.ipynb +++ b/tutorials/exporter.ipynb @@ -22,17 +22,9 @@ }, { "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "import numpy as np\n", "import porepy as pp\n", @@ -87,18 +79,9 @@ }, { "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Keys of the states defined on subdomains: {'u', 'contact_traction', 'p'}\n", - "Keys of the states defined on interfaces: {'mortar_u', 'mortar_p'}\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# Determine all keys of all states on all subdomains\n", "subdomain_states = []\n", @@ -127,7 +110,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -155,7 +138,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -182,7 +165,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -200,27 +183,9 @@ }, { "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Mixed dimensional grid. \n", - "Maximum dimension present: 2 \n", - "Minimum dimension present: 0 \n", - "Size of highest dimensional grid: Cells: 280. Nodes: 178\n", - "In lower dimensions: \n", - "2 grids of dimension 1, with in total 16 cells and 20 nodes. \n", - "1 grids of dimension 0, with in total 1 cells and 0 nodes. \n", - "Total number of interfaces: 4\n", - "2 interfaces between grids of dimension 2 and 1\n", - "2 interfaces between grids of dimension 1 and 0\n", - "\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "g_2d = grids_2d[0]" ] @@ -234,23 +199,9 @@ }, { "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "ename": "ValueError", - "evalue": "The provided data type used for (Grid history: Compute geometry\nNumber of cells 280\nNumber of faces 456\nNumber of nodes 178\nDimension 2, 'u') is not supported.", - "output_type": "error", - "traceback": [ - "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[1;31mValueError\u001b[0m Traceback (most recent call last)", - "\u001b[1;32m~\\AppData\\Local\\Temp/ipykernel_14264/4193921876.py\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[0mexporter_2\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mpp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mExporter\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgb\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"example-2\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"exporter-tutorial\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[0mexporter_2\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mwrite_vtu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mgrids_1d\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"p\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0mg_2d\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"u\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0medges_1d\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"mortar_p\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[1;32mc:\\users\\ivar\\onedrive - university of bergen\\documents\\github\\porepy\\src\\porepy\\viz\\exporter.py\u001b[0m in \u001b[0;36mwrite_vtu\u001b[1;34m(self, data, time_dependent, time_step, gb)\u001b[0m\n\u001b[0;32m 295\u001b[0m \u001b[1;31m# 1. Sort wrt. whether data is associated to subdomains or interfaces.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 296\u001b[0m \u001b[1;31m# 2. Unify data type.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 297\u001b[1;33m \u001b[0msubdomain_data\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minterface_data\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_sort_and_unify_data\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 298\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 299\u001b[0m \u001b[1;31m# Export constant data to separate or standard files\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32mc:\\users\\ivar\\onedrive - university of bergen\\documents\\github\\porepy\\src\\porepy\\viz\\exporter.py\u001b[0m in \u001b[0;36m_sort_and_unify_data\u001b[1;34m(self, data)\u001b[0m\n\u001b[0;32m 750\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 751\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 752\u001b[1;33m raise ValueError(\n\u001b[0m\u001b[0;32m 753\u001b[0m \u001b[1;34mf\"The provided data type used for {pt} is not supported.\"\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 754\u001b[0m )\n", - "\u001b[1;31mValueError\u001b[0m: The provided data type used for (Grid history: Compute geometry\nNumber of cells 280\nNumber of faces 456\nNumber of nodes 178\nDimension 2, 'u') is not supported." - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "exporter_2 = pp.Exporter(model.gb, \"example-2\", \"exporter-tutorial\")\n", "exporter_2.write_vtu([(grids_1d, \"p\"), (grids_2d, \"u\"), (edges_1d, \"mortar_p\")])" @@ -318,6 +269,25 @@ " exporter_5.write_vtu([\"p\", \"u\", \"mortar_p\"], time_step=step)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Alternatively, one can also let the Exporter internally manage the stepping and the appendix used when storing the data to file. This is triggered by the keyword 'time_dependent'." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "exporter_5 = pp.Exporter(model.gb, \"example-5\", \"exporter-tutorial\")\n", + "for step in range(5):\n", + " # Data may change\n", + " exporter_5.write_vtu([\"p\", \"u\", \"mortar_p\"], time_dependent=True)" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -386,6 +356,13 @@ "exporter_5.write_pvd(times_5)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "When providing no argument to write_pvd(), the time steps are used as actual times." + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -403,6 +380,13 @@ "exporter_7 = pp.Exporter(g_2d, \"example-7\", \"exporter-tutorial\")\n", "exporter_7.write_vtu([(\"cc\", g_2d.cell_centers)])" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { From 7cd795d684783f49f4fadb9fc5e74d470f4d29c5 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Tue, 14 Jun 2022 15:41:39 +0200 Subject: [PATCH 55/83] MAINT: Restructure conversion of input data, and improve comments and tutorial. --- src/porepy/viz/exporter.py | 426 ++++++++++++++++++++++++------------- tutorials/exporter.ipynb | 2 +- 2 files changed, 276 insertions(+), 152 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index e8c142d678..14aa7462a4 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -65,7 +65,9 @@ class Exporter: Optional arguments in kwargs: fixed_grid: (optional) in a time dependent simulation specify if the grid changes in time or not. The default is True. - binary: (optional) export in binary format, default is True. + binary: (optional) export in binary format, default is True. If False, + the output is in Ascii format, which allows better readibility and + debugging, but requires more memory. export_constants_separately: (optional) export constants in separate files to potentially reduce memory footprint, default is True. @@ -90,7 +92,8 @@ class Exporter: save.write_pvd(times) where times is a list of actual times (not time steps), associated - to the previously exported time steps. + to the previously exported time steps. If times is not provided + the time steps will be used instead. In general, pvd files gather data exported in separate files, including data on differently dimensioned grids, constant data, and finally time steps. @@ -98,8 +101,10 @@ class Exporter: In the case of different keywords, change the file name with "change_name". - NOTE: the following names are reserved for data exporting: grid_dim, - is_mortar, mortar_side, cell_id, grid_node_number, grid_edge_number + NOTE: the following names are reserved for constant data exporting + and should not be used otherwise (otherwise data is overwritten): + grid_dim, is_mortar, mortar_side, cell_id, grid_node_number, + grid_edge_number """ def __init__( @@ -122,7 +127,11 @@ def __init__( 'binary' (boolean) controlling whether data is stored in binary format (default True); 'export_constants_separately' (boolean) controlling whether - constant data is exported in separate files (default True). + constant data is exported in separate files, which may be of interest + when exporting large data sets (in particular of constant data) for + many time steps (default True); note, however, that the mesh is + exported to each vtu file, which may also require significant amount + of storage. """ # Exporter is operating on grid buckets. Convert to grid bucket if grid is provided. if isinstance(gb, pp.Grid): @@ -213,7 +222,7 @@ def add_constant_data( associated data values for all or no grids of each specific dimension. """ - # Identify change in constant data. Has the effect that + # Interpret change in constant data. Has the effect that # the constant data container will be exported at the # next application of write_vtu(). self._exported_constant_data_up_to_date = False @@ -346,7 +355,7 @@ def write_vtu( def write_pvd( self, - times: np.ndarray, + times: Optional[np.ndarray] = None, file_extension: Optional[Union[np.ndarray, List[int]]] = None, ) -> None: """ @@ -354,18 +363,23 @@ def write_pvd( The user should open only this file in paraview. We assume that the VTU associated files have the same name. - We assume that the VTU associated files are in the same folder. + We assume that the VTU associated files are in the working directory. Parameters: - times (np.ndarray): array of times to be exported. These will be the times - associated with indivdiual time steps in, say, Paraview. By default, - the times will be associated with the order in which the time steps were - exported. This can be overridden by the file_extension argument. + times (optional, np.ndarray): array of actual times to be exported. These will + be the times associated with indivdiual time steps in, say, Paraview. + By default, the times will be associated with the order in which the time + steps were exported. This can be overridden by the file_extension argument. + If no times are provided, the exported time steps are used. file_extension (np.array-like, optional): End of file names used in the export of individual time steps, see self.write_vtu(). If provided, it should have the same length as time. If not provided, the file names will be picked from those used when writing individual time steps. """ + + if times is None: + times = np.array(self._exported_timesteps) + if file_extension is None: file_extension = self._exported_timesteps elif isinstance(file_extension, np.ndarray): @@ -376,6 +390,7 @@ def write_pvd( assert len(file_extension) == times.shape[0] # Make mypy happy + assert times is not None assert file_extension is not None # Extract the time steps related to constant data and @@ -490,19 +505,18 @@ def _sort_and_unify_data( brought into unified format. """ - # Convert data to list, while keeping the data structures provided, - # or provide an empty list if no data provided - if data is None: - data = list() - elif not isinstance(data, list): - data = [data] + # The strategy is to traverse the input data and check each data point + # for its type and apply a corresponding conversion to a unique format. + # For each supported input data format, a dedicated routine is implemented + # to 1. identify whether the type is present; and 2. update the data + # dictionaries (to be returned in this routine) in the correct format: + # The dictionaries use (grid,key) and (edge,key) as key and the data + # array as value. - # Initialize container for data associated to nodes - subdomain_data: SubdomainData = dict() - interface_data: InterfaceData = dict() + # Now a list of these subroutines follows before the definition of the + # actual routine. - # Auxiliary function transforming scalar ranged values - # to vector ranged values when suitable. + # Aux. method: Transform scalar- to vector-ranged values. def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: """ Check whether the value array has the right dimension corresponding @@ -523,8 +537,7 @@ def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: return value - # Auxiliary functions for type checking - + # Aux. method: Check type for Interface def isinstance_interface(e: Interface) -> bool: """ Implementation of isinstance(e, Interface). @@ -536,80 +549,24 @@ def isinstance_interface(e: Interface) -> bool: and isinstance(e[1], pp.Grid) ) - def isinstance_Tuple_subdomains_str(t: Tuple[List[pp.Grid], str]) -> bool: + # Aux. method: Detect and convert data of form "key". + def add_data_from_str(pt, subdomain_data, interface_data): """ - Implementation of isinstance(t, Tuple[List[pp.Grid], str]). - - Detects data input of type ([g1, g2, ...], "name"), - where g1, g2, ... are subdomains. - """ - # Implementation of isinstance(Tuple[List[pp.Grid], str], t) - return list(map(type, t)) == [list, str] and all( - [isinstance(g, pp.Grid) for g in t[0]] - ) - - def isinstance_Tuple_interfaces_str(t: Tuple[List[Interface], str]) -> bool: + Check whether data is provided by a key of a field - could be both node + and edge data. If so, collect all data corresponding to grids and edges + identified by the key. """ - Implementation of isinstance(t, Tuple[List[Interface], str]). - - Detects data input of type ([e1, e2, ...], "name"), - where e1, e2, ... are interfaces. - """ - # Implementation of isinstance(t, Tuple[List[Tuple[pp.Grid, pp.Grid]], str]) - return list(map(type, t)) == [list, str] and all( - [isinstance_interface(g) for g in t[0]] - ) - - def isinstance_Tuple_subdomain_str_array( - t: Tuple[pp.Grid, str, np.ndarray] - ) -> bool: - """ - Implementation of isinstance(t, Tuple[pp.Grid, str, np.ndarray]). - """ - types = list(map(type, t)) - return isinstance(t[0], pp.Grid) and types[1:] == [str, np.ndarray] - - def isinstance_Tuple_interface_str_array( - t: Tuple[Interface, str, np.ndarray] - ) -> bool: - """ - Implementation of isinstance(t, Tuple[Interface, str, np.ndarray]). - """ - return list(map(type, t)) == [ - tuple, - str, - np.ndarray, - ] and isinstance_interface(t[0]) - - def isinstance_Tuple_str_array(pt) -> bool: - """ - Implementation if isinstance(pt, Tuple[str, np.ndarray]. - Detects data input of type ("name", value). - """ - # Check whether the tuple is of length 2 and has the right types. - return list(map(type, pt)) == [str, np.ndarray] - - # Loop over all data points and convert them collect them in the format - # (grid/edge, key, data) for single grids etc. and store them in two - # dictionaries with keys (grid/egde, key) and value given by data. - # Distinguish here between subdomain and interface data and store - # accordingly. - for pt in data: - # Allow for different cases. Distinguish each case separately. - - # Case 1: Data provided by the key of a field only - could be both node - # and edge data. Idea: Collect all data corresponding to grids and edges - # related to the key. + # Only continue in case data is of type str if isinstance(pt, str): - # The data point is simply a string addressing a state using a key + # Identify the key provided through the data. key = pt - # Fetch grids and interfaces as well as data associated to the key + # Initialize tag storing whether data corrspeonding to the key has been found. has_key = False - # Try to find the key in the data dictionary associated to subdomains + # Check data associated to subdomain field data for g, d in self.gb.nodes(): if pp.STATE in d and key in d[pp.STATE]: # Mark the key as found @@ -621,7 +578,7 @@ def isinstance_Tuple_str_array(pt) -> bool: # Add data point in correct format to the collection subdomain_data[(g, key)] = value - # Try to find the key in the data dictionary associated to interfaces + # Check data associated to interface field data for e, d in self.gb.edges(): if pp.STATE in d and key in d[pp.STATE]: # Mark the key as found @@ -640,15 +597,34 @@ def isinstance_Tuple_str_array(pt) -> bool: f"No data with provided key {key} present in the grid bucket." ) - # Case 2a: Data provided by a tuple (g, key). - # Here, g is a single grid or a list of grids. - # Idea: Collect the data only among the grids specified. - elif isinstance_Tuple_subdomains_str(pt): + # Return updated dictionaries and indicate succesful conversion. + return subdomain_data, interface_data, True - # By construction, the first component is a list of grids. - grids: List[pp.Grid] = pt[0] + else: + # Return original data dictionaries and indicate no modification. + return subdomain_data, interface_data, False + + # Aux. method: Detect and convert data of form ([grids], "key"). + def add_data_from_Tuple_subdomains_str( + pt: Tuple[List[pp.Grid], str], subdomain_data, interface_data + ): + """ + Check whether data is provided as tuple (g, key), + where g is a list of grids, and key is a string. + This routine explicitly checks only for subdomain data. + """ + + # Implementation of isinstance(t, Tuple[List[pp.Grid], str]). + isinstance_tuple_subdomains_str = list(map(type, pt)) == [ + list, + str, + ] and all([isinstance(g, pp.Grid) for g in pt[0]]) + + # If of correct type, convert to unique format and update subdomain data. + if isinstance_tuple_subdomains_str: - # By construction, the second component contains a key. + # By construction, the 1. and 2. components are a list of grids and a key. + grids: List[pp.Grid] = pt[0] key = pt[1] # Loop over grids and fetch the states corresponding to the key @@ -670,15 +646,32 @@ def isinstance_Tuple_str_array(pt) -> bool: # Add data point in correct format to collection subdomain_data[(g, key)] = value - # Case 2b: Data provided by a tuple (e, key) - # Here, e is a list of edges. - # Idea: Collect the data only among the specified interfaces. - elif isinstance_Tuple_interfaces_str(pt): + # Return updated dictionaries and indicate succesful conversion. + return subdomain_data, interface_data, True - # By construction, the first component contains a list of interfaces. - edges: List[Interface] = pt[0] + else: + # Return original data dictionaries and indicate no modification. + return subdomain_data, interface_data, False + + # Aux. method: Detect and convert data of form ([interfaces], "key"). + def add_data_from_Tuple_interfaces_str(pt, subdomain_data, interface_data): + """ + Check whether data is provided as tuple (e, key), + where e is a list of interfaces, and key is a string. + This routine explicitly checks only for interface data. + """ + + # Implementation of isinstance(t, Tuple[List[Interface], str]). + isinstance_tuple_interfaces_str = list(map(type, pt)) == [ + list, + str, + ] and all([isinstance_interface(g) for g in pt[0]]) - # By construction, the second component contains a key. + # If of correct type, convert to unique format and update subdomain data. + if isinstance_tuple_interfaces_str: + + # By construction, the 1. and 2. components are a list of interfaces and a key. + edges: List[Interface] = pt[0] key = pt[1] # Loop over edges and fetch the states corresponding to the key @@ -701,33 +694,33 @@ def isinstance_Tuple_str_array(pt) -> bool: # Add data point in correct format to collection interface_data[(e, key)] = value - # Case 3: Data provided by a tuple (key, data). - # This case is only well-defined, if the grid bucket contains - # only a single grid. - # Idea: Extract the unique grid and continue as in Case 2. - elif isinstance_Tuple_str_array(pt): + # Return updated dictionaries and indicate succesful conversion. + return subdomain_data, interface_data, True - # Fetch the correct grid. This option is only supported for grid - # buckets containing a single grid. - grids = [g for g, _ in self.gb.nodes()] - g = grids[0] - if not len(grids) == 1: - raise ValueError( - f"""The data type used for {pt} is only - supported if the grid bucket only contains a single grid.""" - ) + else: + # Return original data dictionaries and indicate no modification. + return subdomain_data, interface_data, False - # Fetch remaining ingredients required to define node data element - key = pt[0] - value = toVectorFormat(pt[1], g) + # Aux. method: Detect and convert data of form (g, "key", value). + def add_data_from_Tuple_subdomain_str_array(pt, subdomain_data, interface_data): + """ + Check whether data is provided as tuple (g, key, data), + where g is a single grid, key is a string, and data is a user-defined data array. + This routine explicitly checks only for subdomain data. + """ - # Add data point in correct format to collection - subdomain_data[(g, key)] = value + # Implementation of isinstance(t, Tuple[pp.Grid, str, np.ndarray]). + # NOTE: The type of a grid is identifying the specific grid type (simplicial + # etc.) Thus, isinstance is used here to detect whether a grid is provided. + types = list(map(type, pt)) + isinstance_Tuple_subdomain_str_array = isinstance(pt[0], pp.Grid) and types[ + 1: + ] == [str, np.ndarray] - # Case 4a: Data provided as tuple (g, key, data). - # Idea: Since this is already the desired format, process naturally. - elif isinstance_Tuple_subdomain_str_array(pt): - # Data point in correct format + # Convert data to unique format and update the subdomain data dictionary. + if isinstance_Tuple_subdomain_str_array: + + # Interpret (g, key, value) = (pt[0], pt[1], pt[2]); g = pt[0] key = pt[1] value = toVectorFormat(pt[2], g) @@ -735,12 +728,35 @@ def isinstance_Tuple_str_array(pt) -> bool: # Add data point in correct format to collection subdomain_data[(g, key)] = value - # Case 4b: Data provided as tuple (e, key, data). - # Idea: Since this is already the desired format, process naturally. - elif isinstance_Tuple_interface_str_array(pt): - # Data point in correct format + # Return updated dictionaries and indicate succesful conversion. + return subdomain_data, interface_data, True + + else: + # Return original data dictionaries and indicate no modification. + return subdomain_data, interface_data, False + + # Aux. method: Detect and convert data of form (e, "key", value). + def add_data_from_Tuple_interface_str_array(pt, subdomain_data, interface_data): + """ + Check whether data is provided as tuple (g, key, data), + where e is a single interface, key is a string, and data is a user-defined + data array. This routine explicitly checks only for interface data. + """ + + # Implementation of isinstance(t, Tuple[Interface, str, np.ndarray]). + isinstance_Tuple_interface_str_array = list(map(type, pt)) == [ + tuple, + str, + np.ndarray, + ] and isinstance_interface(pt[0]) + + # Convert data to unique format and update the interface data dictionary. + if isinstance_Tuple_interface_str_array: + # Interpret (e, key, value) = (pt[0], pt[1], pt[2]); e = pt[0] key = pt[1] + + # Fetch grid data for converting the value to right format; d = self.gb.edge_props(e) mg = d["mortar_grid"] value = toVectorFormat(pt[2], mg) @@ -748,9 +764,103 @@ def isinstance_Tuple_str_array(pt) -> bool: # Add data point in correct format to collection interface_data[(e, key)] = value + # Return updated dictionaries and indicate succesful conversion. + return subdomain_data, interface_data, True + else: + # Return original data dictionaries and indicate no modification. + return subdomain_data, interface_data, False + + # Aux. method: Detect and convert data of form ("key", value). + def add_data_from_Tuple_str_array(pt, subdomain_data, interface_data): + """ + Check whether data is provided by a tuple (key, data), + where key is a string, and data is a user-defined data array. + This only works when the grid bucket contains a single grid. + """ + + # Implementation if isinstance(pt, Tuple[str, np.ndarray]. + isinstance_Tuple_str_array = list(map(type, pt)) == [str, np.ndarray] + + # Convert data to unique format and update the interface data dictionary. + if isinstance_Tuple_str_array: + + # Fetch the correct grid. This option is only supported for grid + # buckets containing a single grid. + grids = [g for g, _ in self.gb.nodes()] + if not len(grids) == 1: + raise ValueError( + f"""The data type used for {pt} is only + supported if the grid bucket only contains a single grid.""" + ) + + # Fetch remaining ingredients required to define node data element + g = grids[0] + key = pt[0] + value = toVectorFormat(pt[1], g) + + # Add data point in correct format to collection + subdomain_data[(g, key)] = value + + # Return updated dictionaries and indicate succesful conversion. + return subdomain_data, interface_data, True + + else: + # Return original data dictionaries and indicate no modification. + return subdomain_data, interface_data, False + + ################################################ + # The actual routine _sort_and_unify_data() + ################################################ + + # Convert data to list, while keeping the data structures provided, + # or provide an empty list if no data provided + if data is None: + data = list() + elif not isinstance(data, list): + data = [data] + + # Initialize container for data associated to nodes + subdomain_data: SubdomainData = dict() + interface_data: InterfaceData = dict() + + # Define methods to be used for checking the data type and performing + # the conversion. This list implicitly also defines which input data is + # allowed. + methods = [ + add_data_from_str, + add_data_from_Tuple_subdomains_str, + add_data_from_Tuple_interfaces_str, + add_data_from_Tuple_subdomain_str_array, + add_data_from_Tuple_interface_str_array, + add_data_from_Tuple_str_array, + ] + + # Loop over all data points and collect them in a unified format. + # For this, two separate dictionaries (for subdomain and interface data) + # are used. The final format uses (subdomain/interface,key) as key of + # the dictionaries, and the value is given by the corresponding data. + for pt in data: + + # Initialize tag storing whether the conversion process for pt is successful. + success = False + + for method in methods: + + # Check whether data point of right type and convert to + # the unique data type. + subdomain_data, interface_data, success = method( + pt, subdomain_data, interface_data + ) + + # Stop, once a supported data format has been detected. + if success: + break + + # Check if provided data has non-supported data format. + if not success: raise ValueError( - f"The provided data type used for {pt} is not supported." + f"Input data {pt} has wrong format and cannot be exported." ) return subdomain_data, interface_data @@ -1021,8 +1131,11 @@ def _export_gb_pvd(self, file_name: str, time_step: Optional[int]) -> None: def _update_meshio_geom(self) -> None: """ - Auxiliary method upating internal copies of the grids. Merely asks for - exporting grids. + Manager for storing the grid information in meshio format. + + The internal variables meshio_geom and m_meshio_geom, storing all + essential (mortar) grid information as points, connectivity and cell + ids, are created and stored. """ # Subdomains for dim in self.dims: @@ -1094,7 +1207,9 @@ def _export_1d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: num_pts = np.sum([g.num_nodes for g in gs]) meshio_pts = np.empty((num_pts, 3)) # type: ignore - # Initialize offset. Required taking into account multiple 1d grids. + # Initialize offset. All data associated to 1d grids is stored in + # the same vtu file, essentially using concatenation. To identify + # each grid, keep track of number of nodes for each grid. nodes_offset = 0 # Loop over all 1d grids @@ -1104,8 +1219,8 @@ def _export_1d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: sl = slice(nodes_offset, nodes_offset + g.num_nodes) meshio_pts[sl, :] = g.nodes.T - # Lines are simplices, and have a trivial connectvity. - cn_indices = self._simplex_cell_to_nodes(2, g) + # Lines are 1-simplices, and have a trivial connectvity. + cn_indices = self._simplex_cell_to_nodes(1, g) # Add to previous connectivity information cell_to_nodes[cell_type] = np.vstack( @@ -1136,7 +1251,7 @@ def _simplex_cell_to_nodes( Determine cell to node connectivity for a general n-simplex mesh. Parameters: - n (int): order of the simplices in the grid. + n (int): dimension of the simplices in the grid. g (pp.Grid): grid containing cells and nodes. cells (np.ndarray, optional): all n-simplex cells @@ -1151,16 +1266,19 @@ def _simplex_cell_to_nodes( else g.cell_nodes().indptr[cells] ) - # Collect the indptr to all nodes of the cell; each n-simplex cell contains n nodes - expanded_cn_indptr = np.vstack([cn_indptr + i for i in range(n)]).reshape( - -1, order="F" - ) + # Each n-simplex has n+1 nodes + num_nodes = n + 1 + + # Collect the indptr to all nodes of the cell. + expanded_cn_indptr = np.vstack( + [cn_indptr + i for i in range(num_nodes)] + ).reshape(-1, order="F") # Detect all corresponding nodes by applying the expanded mask to the indices expanded_cn_indices = g.cell_nodes().indices[expanded_cn_indptr] # Convert to right format. - cn_indices = np.reshape(expanded_cn_indices, (-1, n), order="C") + cn_indices = np.reshape(expanded_cn_indices, (-1, num_nodes), order="C") return cn_indices @@ -1192,7 +1310,9 @@ def _export_2d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: num_pts = np.sum([g.num_nodes for g in gs]) meshio_pts = np.empty((num_pts, 3)) # type: ignore - # Initialize offsets. Required taking into account multiple 2d grids. + # Initialize offsets. All data associated to 2d grids is stored in + # the same vtu file, essentially using concatenation. To identify + # each grid, keep track of number of nodes and cells for each grid. nodes_offset = 0 cell_offset = 0 @@ -1243,14 +1363,15 @@ def _export_2d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # Fetch triangle cells cells = g_cell_map[cell_type] - # Determine the trivial connectivity. - cn_indices = self._simplex_cell_to_nodes(3, g, cells) + # Determine the trivial connectivity - triangles are 2-simplices. + cn_indices = self._simplex_cell_to_nodes(2, g, cells) # Quad and polygon cells. else: # For quads/polygons, g.cell_nodes cannot be blindly used as for # triangles, since the ordering of the nodes may define a cell of - # the type (here specific for quads) x--x and not x--x + # the type (here specific for quads) + # x--x and not x--x # \/ | | # /\ | | # x--x x--x . @@ -1491,7 +1612,9 @@ def _export_simplex_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: num_pts = np.sum([g.num_nodes for g in gs]) meshio_pts = np.empty((num_pts, 3)) # type: ignore - # Initialize offsets. Required taking into account multiple 3d grids. + # Initialize offsets. All data associated to 3d grids is stored in + # the same vtu file, essentially using concatenation. To identify + # each grid, keep track of number of nodes and cells for each grid. nodes_offset = 0 cell_offset = 0 @@ -1510,7 +1633,8 @@ def _export_simplex_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # Determine local connectivity for all cells. Simplices are invariant # under permuatations. Thus, no specific ordering of nodes is required. - cn_indices = self._simplex_cell_to_nodes(4, g, cells) + # Tetrahedra are essentially 3-simplices. + cn_indices = self._simplex_cell_to_nodes(3, g, cells) # Store cell-node connectivity, and add offset taking into account # previous grids @@ -1804,7 +1928,7 @@ def _append_folder_name( """ # If no folder_name is prescribed, the files will be stored in the - # same folder. + # working directory. if folder_name is None: return name diff --git a/tutorials/exporter.ipynb b/tutorials/exporter.ipynb index 30d28d18dc..1cb53d3816 100644 --- a/tutorials/exporter.ipynb +++ b/tutorials/exporter.ipynb @@ -105,7 +105,7 @@ "metadata": {}, "source": [ "## Defining the exporter\n", - "The definition of an object of type pp.Exporter, merely a grid bucket as well as a target is required for the output." + "Two arguments are required to define an object of type pp.Exporter: a grid bucket, and the target name of the output; optionally, one can add a directory name; and in fact, instead of a grid bucket, single grids can also be provided, see Example 7." ] }, { From 78fc04555a2992cc04c24b224691f8727e509483 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Tue, 14 Jun 2022 15:52:09 +0200 Subject: [PATCH 56/83] MAINT: Use simpler setup of the geometry in the exporter tutorial. --- tutorials/exporter.ipynb | 31 ++----------------------------- 1 file changed, 2 insertions(+), 29 deletions(-) diff --git a/tutorials/exporter.ipynb b/tutorials/exporter.ipynb index 1cb53d3816..6fde366084 100644 --- a/tutorials/exporter.ipynb +++ b/tutorials/exporter.ipynb @@ -28,40 +28,13 @@ "source": [ "import numpy as np\n", "import porepy as pp\n", + "from porepy.grids.standard_grids.grid_buckets_2d import two_intersecting\n", "\n", "class BiotFractured(pp.ContactMechanicsBiot):\n", "\n", " def create_grid(self) -> None:\n", " # Define domain\n", - " self.box: Dict = pp.geometry.bounding_box.from_points(\n", - " np.array([[0, 1], [0, 1]])\n", - " )\n", - "\n", - " # Define fracture network\n", - " # ... pts\n", - " frac_a = np.array([[1 / 2.0, 0.0], [1 / 2.0, 0.5]]).T\n", - " frac_b = np.array([[0.16, 0.2], [1, 0.5]]).T\n", - "\n", - " intersecting_fractures = True\n", - "\n", - " if intersecting_fractures:\n", - " frac_pts = np.hstack((frac_a, frac_b))\n", - " frac_connectivity = np.array([[0, 1], [2,3]]).T\n", - " else:\n", - " frac_pts = frac_a\n", - " frac_connectivity = np.array([[0], [1]])\n", - "\n", - " # ... network\n", - " network_2d = pp.FractureNetwork2d(frac_pts, frac_connectivity, self.box)\n", - "\n", - " # Generate mixed-dimensional mesh\n", - " mesh_size_frac = self.params.get(\"mesh_size_frac\", 1e-1)\n", - " mesh_size_bound = self.params.get(\"mesh_size_bound\", 1e-1)\n", - " mesh_args = {\n", - " \"mesh_size_frac\": mesh_size_frac,\n", - " \"mesh_size_bound\": mesh_size_bound\n", - " }\n", - " self.gb = network_2d.mesh(mesh_args)\n", + " self.gb, self.box = two_intersecting()\n", "\n", " pp.contact_conditions.set_projections(self.gb)\n", "\n", From 9a1283f667f0e6c0b3d34bf25922136823e303dc Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Tue, 14 Jun 2022 22:08:14 +0200 Subject: [PATCH 57/83] MAINT: Add test checking whether hard-coded node number of hex cells correct. --- src/porepy/viz/exporter.py | 77 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 74 insertions(+), 3 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 14aa7462a4..0c67e598cb 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -1732,6 +1732,9 @@ def _export_hexahedron_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # regarding hexahedron cells. cn_indices = cn_indices[:, [0, 2, 6, 4, 1, 3, 7, 5]] + # Test whether the hard-coded numbering really defines a hexahedron. + assert self._test_hex_meshio_format(g.nodes, cn_indices) + # Store cell-node connectivity, and add offset taking into account # previous grids cell_to_nodes = np.vstack((cell_to_nodes, cn_indices + nodes_offset)) @@ -1750,6 +1753,75 @@ def _export_hexahedron_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # Return final meshio data: points, cell (connectivity), cell ids return Meshio_Geom(meshio_pts, meshio_cells, meshio_cell_id) + def _test_hex_meshio_format(self, nodes, cn_indices) -> bool: + + import numba + + @numba.jit(nopython=True) + def _function_to_compile(nodes, cn_indices) -> bool: + """ + Test whether the node numbering for each cell complies + with the hardcoded numbering used by meshio, cf. documentation + of meshio. + + For this, fetch three potential sides of the hex and + test whether they lie circular chain within a plane. + Here, the test is successful, if the node sets [0,1,2,3], + [4,5,6,7], and [0,1,5,4] define planes. + """ + + # Assume initially correct format + correct_format = True + + # Test each cell separately + for i in range(cn_indices.shape[0]): + + # Assume initially that the cell is a hex + is_hex = True + + # Visit three node sets and check whether + # they define feasible sides of a hex. + global_ind_0 = cn_indices[i, 0:4] + global_ind_1 = cn_indices[i, 4:8] + global_ind_2 = np.array( + [ + cn_indices[i, 0], + cn_indices[i, 1], + cn_indices[i, 5], + cn_indices[i, 4], + ] + ) + + # Check each side separately + for global_ind in [global_ind_0, global_ind_1, global_ind_2]: + + # Fetch coordinates associated to the four nodes + coords = nodes[:, global_ind] + + # Check orientation by determining normalized cross products + # of all two consecutive connections. + normalized_cross = np.zeros((3, 3)) + for j in range(3): + cross = np.cross( + coords[:, j + 1] - coords[:, j], + coords[:, (j + 2) % 4] - coords[:, j + 1], + ) + normalized_cross[:, j] = cross / np.linalg.norm(cross) + + # Consider the points lying in a plane when each connection + # produces the same normalized cross product. + is_plane = np.linalg.matrix_rank(normalized_cross) == 1 + + # Combine with the remaining sides + is_hex = is_hex and is_plane + + # Combine the results for all cells + correct_format = correct_format and is_hex + + return correct_format + + return _function_to_compile(nodes, cn_indices) + def _export_polyhedron_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: """ Export the geometrical data (point coordinates) and connectivity @@ -1887,9 +1959,8 @@ def _write( # Utilize meshio_geom for this. for field in fields: - # TODO RM? as implemented now, field.values should never be None. - if field.values is None: - continue + # Although technically possible, as implemented, field.values should never be None. + assert field.values is not None # For each field create a sub-vector for each geometrically uniform group of cells num_block = meshio_geom.cell_ids.size From 64e75fa0aa7af9154eb8c2369461b639c146d76a Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 17 Jun 2022 01:17:14 +0200 Subject: [PATCH 58/83] FIX: Use consistent typing. Aim at using 'any' instead of 'C' layout. --- src/porepy/viz/exporter.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 0c67e598cb..01e5f78196 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -873,7 +873,8 @@ def _update_constant_mesh_data(self) -> None: attributes _constant_subdomain_data and _constant_interface_data, have the same format as the output of _sort_and_unify_data. """ - # Identify change in constant data. Has the effect that + # Assume a change in constant data (it is not checked whether + # the data really has been modified). Has the effect that # the constant data container will be exported at the # next application of write_vtu(). self._exported_constant_data_up_to_date = False @@ -1790,7 +1791,7 @@ def _function_to_compile(nodes, cn_indices) -> bool: cn_indices[i, 5], cn_indices[i, 4], ] - ) + )[:] # Check each side separately for global_ind in [global_ind_0, global_ind_1, global_ind_2]: From d62b4d195d53d3778abf1f6d0462ccf4f83bb23f Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 17 Jun 2022 01:24:35 +0200 Subject: [PATCH 59/83] FIX: Use consistent order. --- src/porepy/viz/exporter.py | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 01e5f78196..f199dc4924 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -1782,8 +1782,26 @@ def _function_to_compile(nodes, cn_indices) -> bool: # Visit three node sets and check whether # they define feasible sides of a hex. - global_ind_0 = cn_indices[i, 0:4] - global_ind_1 = cn_indices[i, 4:8] + # FIXME use shorter code of the following style - note: the order in the + # array is not the same troubling numba + #global_ind_0 = cn_indices[i, 0:4] + #global_ind_1 = cn_indices[i, 4:8] + global_ind_0 = np.array( + [ + cn_indices[i, 0], + cn_indices[i, 1], + cn_indices[i, 2], + cn_indices[i, 3], + ] + ) + global_ind_1 = np.array( + [ + cn_indices[i, 4], + cn_indices[i, 5], + cn_indices[i, 6], + cn_indices[i, 7], + ] + ) global_ind_2 = np.array( [ cn_indices[i, 0], @@ -1791,7 +1809,7 @@ def _function_to_compile(nodes, cn_indices) -> bool: cn_indices[i, 5], cn_indices[i, 4], ] - )[:] + ) # Check each side separately for global_ind in [global_ind_0, global_ind_1, global_ind_2]: From 1b72d8f1e66816ae38324147ca7d2dfd2664f8af Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 17 Jun 2022 01:28:13 +0200 Subject: [PATCH 60/83] STY: Black. --- src/porepy/viz/exporter.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index f199dc4924..549e799cf9 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -1784,8 +1784,8 @@ def _function_to_compile(nodes, cn_indices) -> bool: # they define feasible sides of a hex. # FIXME use shorter code of the following style - note: the order in the # array is not the same troubling numba - #global_ind_0 = cn_indices[i, 0:4] - #global_ind_1 = cn_indices[i, 4:8] + # global_ind_0 = cn_indices[i, 0:4] + # global_ind_1 = cn_indices[i, 4:8] global_ind_0 = np.array( [ cn_indices[i, 0], From 823abd2ea71f3b806c694fb2fff361298e87935f Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sat, 25 Jun 2022 23:22:45 +0200 Subject: [PATCH 61/83] MAINT: Remove redundant routine for renaming output. --- src/porepy/viz/exporter.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 549e799cf9..c9ee2d2b64 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -193,18 +193,6 @@ def __init__( "The sorting algorithm requires numba to be installed." ) - # TODO in use by anyone? - def change_name(self, file_name: str) -> None: - """ - Change the root name of the files, useful when different keywords are - considered but on the same grid. - - Parameters: - file_name (str): the new root name of the files. - - """ - self.file_name = file_name - def add_constant_data( self, data: Optional[Union[DataInput, List[DataInput]]] = None, From 4c2a4a3a4558e1758c71f3275d0c9d36092a1121 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sat, 25 Jun 2022 23:40:44 +0200 Subject: [PATCH 62/83] MAINT: Move routine for sorting points to utils. --- src/porepy/utils/sort_points.py | 92 +++++++++++++++++++++++++++++++ src/porepy/viz/exporter.py | 98 +-------------------------------- 2 files changed, 93 insertions(+), 97 deletions(-) diff --git a/src/porepy/utils/sort_points.py b/src/porepy/utils/sort_points.py index a71c699f5c..08ed918836 100644 --- a/src/porepy/utils/sort_points.py +++ b/src/porepy/utils/sort_points.py @@ -101,6 +101,98 @@ def sort_point_pairs( return sorted_lines, sort_ind +def sort_multiple_point_pairs(lines: np.ndarray) -> np.ndarray: + """Function to sort multiple pairs of points to form circular chains. + + The routine contains essentially the same functionality as sort_point_pairs, + but stripped down to the special case of circular chains. Differently to + sort_point_pairs, this variant sorts an arbitrary amount of independent + point pairs. The chains are restricted by the assumption that each contains + equally many line segments. Finally, this routine uses numba. + + Parameters: + lines (np.ndarray): Array of size 2 * num_chains x num_lines_per_chain, + containing node indices. For each pair of two rows, each column + represents a line segment connectng the two nodes in the two entries + of this column. + + Returns: + np.ndarray: Sorted version of lines, where for each chain, the collection + of line segments has been potentially flipped and sorted. + """ + + import numba + + @numba.jit(nopython=True) + def _function_to_compile(lines): + """ + Copy of pp.utils.sort_points.sort_point_pairs. This version is extended + to multiple chains. Each chain is implicitly assumed to be circular. + """ + + # Retrieve number of chains and lines per chain from the shape. + # Implicitly expect that all chains have the same length + num_chains, chain_length = lines.shape + # Since for each chain lines includes two rows, divide by two + num_chains = int(num_chains / 2) + + # Initialize array of sorted lines to be the final output + sorted_lines = np.zeros((2 * num_chains, chain_length)) + # Fix the first line segment for each chain and identify + # it as in place regarding the sorting. + sorted_lines[:, 0] = lines[:, 0] + # Keep track of which lines have been fixed and which are still candidates + found = np.zeros(chain_length) + found[0] = 1 + + # Loop over chains and consider each chain separately. + for c in range(num_chains): + # Initialize found making any line segment aside of the first a candidate + found[1:] = 0 + + # Define the end point of the previous and starting point for the next + # line segment + prev = sorted_lines[2 * c + 1, 0] + + # The sorting algorithm: Loop over all positions in the chain to be set next. + # Find the right candidate to be moved to this position and possibly flipped + # if needed. A candidate is identified as fitting if it contains one point + # equal to the current starting point. This algorithm uses a double loop, + # which is the most naive approach. However, assume chain_length is in + # general small. + for i in range(1, chain_length): # The first line has already been found + for j in range( + 1, chain_length + ): # The first line has already been found + # A candidate line segment with matching start and end point + # in the first component of the point pair. + if np.abs(found[j]) < 1e-6 and lines[2 * c, j] == prev: + # Copy the segment to the right place + sorted_lines[2 * c : 2 * c + 2, i] = lines[2 * c : 2 * c + 2, j] + # Mark as used + found[j] = 1 + # Define the starting point for the next line segment + prev = lines[2 * c + 1, j] + break + # A candidate line segment with matching start and end point + # in the second component of the point pair. + elif np.abs(found[j]) < 1e-6 and lines[2 * c + 1, j] == prev: + # Flip and copy the segment to the right place + sorted_lines[2 * c, i] = lines[2 * c + 1, j] + sorted_lines[2 * c + 1, i] = lines[2 * c, j] + # Mark as used + found[j] = 1 + # Define the starting point for the next line segment + prev = lines[2 * c, j] + break + + # Return the sorted lines defining chains. + return sorted_lines + + # Run numba compiled function + return _function_to_compile(lines) + + def sort_point_plane( pts: np.ndarray, centre: np.ndarray, diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index c9ee2d2b64..8c4fec2a44 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -1401,7 +1401,7 @@ def _export_2d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # Sort faces for each cell such that they form a chain. Use a function # compiled with Numba. This step is the bottleneck of this routine. - cfn = self._sort_point_pairs_numba(cfn).astype(int) + cfn = pp.utils.sort_points.sort_multiple_point_pairs(cfn).astype(int) # For each cell pick the sorted nodes such that they form a chain # and thereby define the connectivity, i.e., skip every second row. @@ -1436,102 +1436,6 @@ def _export_2d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # Return final meshio data: points, cell (connectivity), cell ids return Meshio_Geom(meshio_pts, meshio_cells, meshio_cell_id) - def _sort_point_pairs_numba(self, lines: np.ndarray) -> np.ndarray: - """ - This a simplified copy of pp.utils.sort_points.sort_point_pairs. - - Essentially the same functionality as sort_point_pairs, but stripped down - to the special case of circular chains. Differently to sort_point_pairs, this - variant sorts an arbitrary amount of independent point pairs. The chains - are restricted by the assumption that each contains equally many line segments. - Finally, this routine uses numba. - - Parameters: - lines (np.ndarray): Array of size 2 * num_chains x num_lines_per_chain, - containing node indices. For each pair of two rows, each column - represents a line segment connectng the two nodes in the two entries - of this column. - - Returns: - np.ndarray: Sorted version of lines, where for each chain, the collection - of line segments has been potentially flipped and sorted. - """ - - import numba - - @numba.jit(nopython=True) - def _function_to_compile(lines): - """ - Copy of pp.utils.sort_points.sort_point_pairs. This version is extended - to multiple chains. Each chain is implicitly assumed to be circular. - """ - - # Retrieve number of chains and lines per chain from the shape. - # Implicitly expect that all chains have the same length - num_chains, chain_length = lines.shape - # Since for each chain lines includes two rows, divide by two - num_chains = int(num_chains / 2) - - # Initialize array of sorted lines to be the final output - sorted_lines = np.zeros((2 * num_chains, chain_length)) - # Fix the first line segment for each chain and identify - # it as in place regarding the sorting. - sorted_lines[:, 0] = lines[:, 0] - # Keep track of which lines have been fixed and which are still candidates - found = np.zeros(chain_length) - found[0] = 1 - - # Loop over chains and consider each chain separately. - for c in range(num_chains): - # Initialize found making any line segment aside of the first a candidate - found[1:] = 0 - - # Define the end point of the previous and starting point for the next - # line segment - prev = sorted_lines[2 * c + 1, 0] - - # The sorting algorithm: Loop over all positions in the chain to be set next. - # Find the right candidate to be moved to this position and possibly flipped - # if needed. A candidate is identified as fitting if it contains one point - # equal to the current starting point. This algorithm uses a double loop, - # which is the most naive approach. However, assume chain_length is in - # general small. - for i in range( - 1, chain_length - ): # The first line has already been found - for j in range( - 1, chain_length - ): # The first line has already been found - # A candidate line segment with matching start and end point - # in the first component of the point pair. - if np.abs(found[j]) < 1e-6 and lines[2 * c, j] == prev: - # Copy the segment to the right place - sorted_lines[2 * c : 2 * c + 2, i] = lines[ - 2 * c : 2 * c + 2, j - ] - # Mark as used - found[j] = 1 - # Define the starting point for the next line segment - prev = lines[2 * c + 1, j] - break - # A candidate line segment with matching start and end point - # in the second component of the point pair. - elif np.abs(found[j]) < 1e-6 and lines[2 * c + 1, j] == prev: - # Flip and copy the segment to the right place - sorted_lines[2 * c, i] = lines[2 * c + 1, j] - sorted_lines[2 * c + 1, i] = lines[2 * c, j] - # Mark as used - found[j] = 1 - # Define the starting point for the next line segment - prev = lines[2 * c, j] - break - - # Return the sorted lines defining chains. - return sorted_lines - - # Run numba compiled function - return _function_to_compile(lines) - def _export_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: """ Export the geometrical data (point coordinates) and connectivity From 0ac365bba26b8fed73b57660d0acf8135dcfe17b Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sun, 26 Jun 2022 00:00:12 +0200 Subject: [PATCH 63/83] DOC: Be more precise on terminology of constant data. --- src/porepy/viz/exporter.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 8c4fec2a44..4d9c61d136 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -68,7 +68,8 @@ class Exporter: binary: (optional) export in binary format, default is True. If False, the output is in Ascii format, which allows better readibility and debugging, but requires more memory. - export_constants_separately: (optional) export constants in separate files + export_constants_separately: (optional) export constant data (includes mesh + and geometry information as well as user-defined data) in separate files to potentially reduce memory footprint, default is True. How to use: @@ -198,7 +199,15 @@ def add_constant_data( data: Optional[Union[DataInput, List[DataInput]]] = None, ) -> None: """ - Collect constant data (after unifying). And later export to own files. + Collect user-defined constant-in-time data, associated to grids, + and to be exported to separate files instead of the main files.. + + In principle, constant data is not different from standard output + data. It is merely printed to another file and just when any constant + data is updated (via updates of the grid, or the use of this routine). + Hence, the same input formats are allowed as for the usual field data, + which is varying in time. As part of the routine, the data is converted + to the same unified format. Parameters: data (Union[DataInput, List[DataInput]], optional): node and From 226b15ef99b5a4f90f4b68ee0e94035084fbc634 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sun, 26 Jun 2022 00:09:20 +0200 Subject: [PATCH 64/83] tmp --- src/porepy/viz/exporter.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 4d9c61d136..3dac2ac63b 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -1410,7 +1410,9 @@ def _export_2d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # Sort faces for each cell such that they form a chain. Use a function # compiled with Numba. This step is the bottleneck of this routine. - cfn = pp.utils.sort_points.sort_multiple_point_pairs(cfn).astype(int) + cfn = pp.utils.sort_points.sort_multiple_point_pairs(cfn).astype( + int + ) # For each cell pick the sorted nodes such that they form a chain # and thereby define the connectivity, i.e., skip every second row. From 40ed09b74cb0798ba10d697f6fcedcffa69cc521 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sun, 26 Jun 2022 00:11:09 +0200 Subject: [PATCH 65/83] MAINT: Make correct use of typing keyword 'Optional'. --- src/porepy/viz/exporter.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 3dac2ac63b..30eda28453 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -27,9 +27,10 @@ Field = namedtuple("Field", ["name", "values"]) # Object for managing meshio-relevant data, as well as a container -# for its storage, taking dimensions as inputs. +# for its storage, taking dimensions as inputs. Since 0d grids are +# stored as 'None', allow for such values. Meshio_Geom = namedtuple("Meshio_Geom", ["pts", "connectivity", "cell_ids"]) -MD_Meshio_Geom = Dict[int, Optional[Meshio_Geom]] +MD_Meshio_Geom = Dict[int, Union[None, Meshio_Geom]] # Interface between subdomains Interface = Tuple[pp.Grid, pp.Grid] @@ -239,7 +240,7 @@ def add_constant_data( def write_vtu( self, data: Optional[Union[DataInput, List[DataInput]]] = None, - time_dependent: Optional[bool] = False, + time_dependent: bool = False, time_step: Optional[int] = None, gb: Optional[Union[pp.Grid, pp.GridBucket]] = None, ) -> None: @@ -259,7 +260,7 @@ def write_vtu( NOTE: The user has to make sure that each unique key has associated data values for all or no grids of each specific dimension. - time_dependent (boolean, optional): If False, file names will + time_dependent (boolean): If False (default), file names will not be appended with an index that marks the time step. Can be overwritten by giving a value to time_step; if not, the file names will subsequently be ending with 1, 2, etc. From 689bfb39e5dfcd8571953612962e7bc085cd8fb4 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sun, 26 Jun 2022 00:11:35 +0200 Subject: [PATCH 66/83] MAINT: Remove redundant checks for mypy. --- src/porepy/viz/exporter.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 30eda28453..64c8f562f7 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -387,10 +387,6 @@ def write_pvd( assert isinstance(file_extension, list) assert len(file_extension) == times.shape[0] - # Make mypy happy - assert times is not None - assert file_extension is not None - # Extract the time steps related to constant data and # complying with file_extension. Implicitly test whether # file_extension is a subset of _exported_timesteps. From 7274e4a5fc4cc1e156eb6786a105923dede89490 Mon Sep 17 00:00:00 2001 From: jwboth <73653591+jwboth@users.noreply.github.com> Date: Sun, 26 Jun 2022 00:34:57 +0200 Subject: [PATCH 67/83] Apply suggestions from code review MAINT: Include suggestion by EK (code simplifications and improved doc). Co-authored-by: Eirik Keilegavlen --- src/porepy/viz/exporter.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 549e799cf9..d051d638fa 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -526,6 +526,7 @@ def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: """ # Make some checks if not value.size % g.num_cells == 0: + # This line will raise an error if node or face data is exported. raise ValueError("The data array is not compatible with the grid.") # Convert to vectorial data if more data provided than grid cells available, @@ -1201,7 +1202,7 @@ def _export_1d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # Since each cell is a line, the list of cell ids is trivial total_num_cells = np.sum(np.array([g.num_cells for g in gs])) cell_id: Dict[str, List[int]] = { - cell_type: np.arange(total_num_cells, dtype=int).tolist() + cell_type: [i for i in range(total_num_cells)] } # Data structure for storing node coordinates of all 1d grids. From f49f9d050206745218254f7eb610d7529703acba Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Sun, 26 Jun 2022 00:57:24 +0200 Subject: [PATCH 68/83] MAINT: Replace use of object-arrays with lists. --- src/porepy/viz/exporter.py | 68 ++++++++++++++++++-------------------- 1 file changed, 32 insertions(+), 36 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 64c8f562f7..8fe08bcfd7 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -1226,15 +1226,18 @@ def _export_1d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: nodes_offset += g.num_nodes # Construct the meshio data structure - num_blocks = len(cell_to_nodes) - meshio_cells = np.empty(num_blocks, dtype=object) - meshio_cell_id = np.empty(num_blocks, dtype=object) + meshio_cells = list() + meshio_cell_id = list() # For each cell_type store the connectivity pattern cell_to_nodes for # the corresponding cells with ids from cell_id. - for block, (cell_type, cell_block) in enumerate(cell_to_nodes.items()): - meshio_cells[block] = meshio.CellBlock(cell_type, cell_block.astype(int)) - meshio_cell_id[block] = np.array(cell_id[cell_type]) + for (cell_type, cell_block) in cell_to_nodes.items(): + meshio_cells.append( + meshio.CellBlock(cell_type, cell_block.astype(int)) + ) + meshio_cell_id.append( + np.array(cell_id[cell_type]) + ) # Return final meshio data: points, cell (connectivity), cell ids return Meshio_Geom(meshio_pts, meshio_cells, meshio_cell_id) @@ -1425,21 +1428,22 @@ def _export_2d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: cell_offset += g.num_cells # Construct the meshio data structure - num_blocks = len(cell_to_nodes) - meshio_cells = np.empty(num_blocks, dtype=object) - meshio_cell_id = np.empty(num_blocks, dtype=object) + meshio_cells = list() + meshio_cell_id = list() # For each cell_type store the connectivity pattern cell_to_nodes for # the corresponding cells with ids from cell_id. - for block, (cell_type, cell_block) in enumerate(cell_to_nodes.items()): + for (cell_type, cell_block) in cell_to_nodes.items(): # Meshio requires the keyword "polygon" for general polygons. # Thus, remove the number of nodes associated to polygons. cell_type_meshio_format = "polygon" if "polygon" in cell_type else cell_type - meshio_cells[block] = meshio.CellBlock( - cell_type_meshio_format, cell_block.astype(int) + meshio_cells.append( + meshio.CellBlock(cell_type_meshio_format, cell_block.astype(int)) + ) + meshio_cell_id.append( + np.array(cell_id[cell_type]) ) - meshio_cell_id[block] = np.array(cell_id[cell_type]) # Return final meshio data: points, cell (connectivity), cell ids return Meshio_Geom(meshio_pts, meshio_cells, meshio_cell_id) @@ -1546,11 +1550,9 @@ def _export_simplex_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: cell_offset += g.num_cells # Initialize the meshio data structure for the connectivity and cell ids. - # There is only one cell type. - meshio_cells = np.empty(1, dtype=object) - meshio_cell_id = np.empty(1, dtype=object) - meshio_cells[0] = meshio.CellBlock("tetra", cell_to_nodes.astype(int)) - meshio_cell_id[0] = np.array(cell_id) + # There is only one cell type, and meshio expects iterable data structs. + meshio_cells = [meshio.CellBlock("tetra", cell_to_nodes.astype(int))] + meshio_cell_id = [np.array(cell_id)] # Return final meshio data: points, cell (connectivity), cell ids return Meshio_Geom(meshio_pts, meshio_cells, meshio_cell_id) @@ -1645,11 +1647,9 @@ def _export_hexahedron_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: cell_offset += g.num_cells # Initialize the meshio data structure for the connectivity and cell ids. - # There is only one cell type. - meshio_cells = np.empty(1, dtype=object) - meshio_cell_id = np.empty(1, dtype=object) - meshio_cells[0] = meshio.CellBlock("hexahedron", cell_to_nodes.astype(int)) - meshio_cell_id[0] = np.array(cell_id) + # There is only one cell type, and meshio expects iterable data structs. + meshio_cells = [meshio.CellBlock("hexahedron", cell_to_nodes.astype(int))] + meshio_cell_id = [np.array(cell_id)] # Return final meshio data: points, cell (connectivity), cell ids return Meshio_Geom(meshio_pts, meshio_cells, meshio_cell_id) @@ -1839,18 +1839,15 @@ def _export_polyhedron_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: nodes_offset += g.num_nodes cell_offset += g.num_cells - # Determine the total number of blocks / cell types. - num_blocks = len(cell_to_faces) - # Initialize the meshio data structure for the connectivity and cell ids. - meshio_cells = np.empty(num_blocks, dtype=object) - meshio_cell_id = np.empty(num_blocks, dtype=object) + meshio_cells = list() + meshio_cell_id = list() # Store the cells in meshio format - for block, (cell_type, cell_block) in enumerate(cell_to_faces.items()): + for (cell_type, cell_block) in cell_to_faces.items(): # Adapt the block number taking into account of previous cell types. - meshio_cells[block] = meshio.CellBlock(cell_type, cell_block) - meshio_cell_id[block] = np.array(cell_id[cell_type]) + meshio_cells.append(meshio.CellBlock(cell_type, cell_block)) + meshio_cell_id.append(np.array(cell_id[cell_type])) # Return final meshio data: points, cell (connectivity), cell ids return Meshio_Geom(meshio_pts, meshio_cells, meshio_cell_id) @@ -1882,15 +1879,14 @@ def _write( assert field.values is not None # For each field create a sub-vector for each geometrically uniform group of cells - num_block = meshio_geom.cell_ids.size - cell_data[field.name] = np.empty(num_block, dtype=object) + cell_data[field.name] = list() # Fill up the data - for block, ids in enumerate(meshio_geom.cell_ids): + for ids in meshio_geom.cell_ids: if field.values.ndim == 1: - cell_data[field.name][block] = field.values[ids] + cell_data[field.name].append(field.values[ids]) elif field.values.ndim == 2: - cell_data[field.name][block] = field.values[:, ids].T + cell_data[field.name].append(field.values[:, ids].T) else: raise ValueError From 5f269bbba270040aa0074193f35c747626423344 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Mon, 27 Jun 2022 12:09:49 +0200 Subject: [PATCH 69/83] STY: Black, mypy, typing. --- src/porepy/utils/sort_points.py | 2 +- src/porepy/viz/exporter.py | 24 +++++++++--------------- 2 files changed, 10 insertions(+), 16 deletions(-) diff --git a/src/porepy/utils/sort_points.py b/src/porepy/utils/sort_points.py index 08ed918836..04aeba9f53 100644 --- a/src/porepy/utils/sort_points.py +++ b/src/porepy/utils/sort_points.py @@ -124,7 +124,7 @@ def sort_multiple_point_pairs(lines: np.ndarray) -> np.ndarray: import numba @numba.jit(nopython=True) - def _function_to_compile(lines): + def _function_to_compile(lines: np.ndarray): """ Copy of pp.utils.sort_points.sort_point_pairs. This version is extended to multiple chains. Each chain is implicitly assumed to be circular. diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index f43e340b46..922cef5192 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -1195,9 +1195,7 @@ def _export_1d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # Dictionary collecting all cell ids for each cell type. # Since each cell is a line, the list of cell ids is trivial total_num_cells = np.sum(np.array([g.num_cells for g in gs])) - cell_id: Dict[str, List[int]] = { - cell_type: [i for i in range(total_num_cells)] - } + cell_id: Dict[str, List[int]] = {cell_type: [i for i in range(total_num_cells)]} # Data structure for storing node coordinates of all 1d grids. num_pts = np.sum([g.num_nodes for g in gs]) @@ -1233,12 +1231,8 @@ def _export_1d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # For each cell_type store the connectivity pattern cell_to_nodes for # the corresponding cells with ids from cell_id. for (cell_type, cell_block) in cell_to_nodes.items(): - meshio_cells.append( - meshio.CellBlock(cell_type, cell_block.astype(int)) - ) - meshio_cell_id.append( - np.array(cell_id[cell_type]) - ) + meshio_cells.append(meshio.CellBlock(cell_type, cell_block.astype(int))) + meshio_cell_id.append(np.array(cell_id[cell_type])) # Return final meshio data: points, cell (connectivity), cell ids return Meshio_Geom(meshio_pts, meshio_cells, meshio_cell_id) @@ -1442,9 +1436,7 @@ def _export_2d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: meshio_cells.append( meshio.CellBlock(cell_type_meshio_format, cell_block.astype(int)) ) - meshio_cell_id.append( - np.array(cell_id[cell_type]) - ) + meshio_cell_id.append(np.array(cell_id[cell_type])) # Return final meshio data: points, cell (connectivity), cell ids return Meshio_Geom(meshio_pts, meshio_cells, meshio_cell_id) @@ -1655,12 +1647,14 @@ def _export_hexahedron_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # Return final meshio data: points, cell (connectivity), cell ids return Meshio_Geom(meshio_pts, meshio_cells, meshio_cell_id) - def _test_hex_meshio_format(self, nodes, cn_indices) -> bool: + def _test_hex_meshio_format( + self, nodes: np.ndarray, cn_indices: np.ndarray + ) -> bool: import numba @numba.jit(nopython=True) - def _function_to_compile(nodes, cn_indices) -> bool: + def _function_to_compile(nodes: np.ndarray, cn_indices: np.ndarray) -> bool: """ Test whether the node numbering for each cell complies with the hardcoded numbering used by meshio, cf. documentation @@ -1870,7 +1864,7 @@ def _write( meshio format (for a single dimension). """ # Initialize empty cell data dictinary - cell_data = {} + cell_data: Dict[str, List[np.ndarray]] = {} # Split the data for each group of geometrically uniform cells # Utilize meshio_geom for this. From b71c265995b0e3a629aa331526738bac5b63300f Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Mon, 27 Jun 2022 12:11:18 +0200 Subject: [PATCH 70/83] MAINT: Specify output type. --- src/porepy/utils/sort_points.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/porepy/utils/sort_points.py b/src/porepy/utils/sort_points.py index 04aeba9f53..fe37ee30b1 100644 --- a/src/porepy/utils/sort_points.py +++ b/src/porepy/utils/sort_points.py @@ -124,7 +124,7 @@ def sort_multiple_point_pairs(lines: np.ndarray) -> np.ndarray: import numba @numba.jit(nopython=True) - def _function_to_compile(lines: np.ndarray): + def _function_to_compile(lines: np.ndarray) -> np.ndarray: """ Copy of pp.utils.sort_points.sort_point_pairs. This version is extended to multiple chains. Each chain is implicitly assumed to be circular. From f694760313cb5e78d516b8c4e6b19a71b8bcd5f0 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Thu, 30 Jun 2022 22:40:57 +0200 Subject: [PATCH 71/83] MAINT: Update exporter and test_vtk wrt md_grid refactoring --- src/porepy/viz/exporter.py | 946 ++++++++++++++++++++----------------- tests/unit/test_vtk.py | 96 ++-- 2 files changed, 552 insertions(+), 490 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index c61fba23df..64028b6964 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -10,7 +10,7 @@ import os import sys from collections import namedtuple -from typing import Dict, Iterable, List, Optional, Set, Tuple, Union +from typing import Iterable, Optional, Union import meshio import numpy as np @@ -24,73 +24,35 @@ # for its storage, taking dimensions as inputs. Since 0d grids are # stored as 'None', allow for such values. Meshio_Geom = namedtuple("Meshio_Geom", ["pts", "connectivity", "cell_ids"]) -MD_Meshio_Geom = Dict[int, Union[None, Meshio_Geom]] - -# Interface between subdomains -Interface = Tuple[pp.Grid, pp.Grid] +MD_Meshio_Geom = dict[int, Union[None, Meshio_Geom]] # All allowed data structures to define data for exporting DataInput = Union[ # Keys for states str, # Subdomain specific data types - Tuple[List[pp.Grid], str], - Tuple[pp.Grid, str, np.ndarray], - Tuple[str, np.ndarray], + tuple[list[pp.Grid], str], + tuple[pp.Grid, str, np.ndarray], + tuple[str, np.ndarray], # Interface specific data types - Tuple[List[Interface], str], - Tuple[Interface, str, np.ndarray], + tuple[list[pp.MortarGrid], str], + tuple[pp.MortarGrid, str, np.ndarray], ] # Data structure in which data is stored after preprocessing -SubdomainData = Dict[Tuple[pp.Grid, str], np.ndarray] -InterfaceData = Dict[Tuple[Interface, str], np.ndarray] +SubdomainData = dict[tuple[pp.Grid, str], np.ndarray] +InterfaceData = dict[tuple[pp.MortarGrid, str], np.ndarray] class Exporter: """ Class for exporting data to vtu files. - Parameters: - gb: grid bucket (if grid is provided, it will be converted to a grid bucket) - file_name: the root of file name without any extension. - folder_name: (optional) the name of the folder to save the file. - If the folder does not exist it will be created. - - Optional arguments in kwargs: - fixed_grid: (optional) in a time dependent simulation specify if the - grid changes in time or not. The default is True. - binary: (optional) export in binary format, default is True. If False, - the output is in Ascii format, which allows better readibility and - debugging, but requires more memory. - export_constants_separately: (optional) export constant data (includes mesh - and geometry information as well as user-defined data) in separate files - to potentially reduce memory footprint, default is True. - - How to use: - The Exporter allows for various way to express which state variables, on which grids, and which extra data should be exported. A thorough demonstration is available as a dedicated tutorial. Check out tutorials/exporter.ipynb. - Here, merely a brief demonstration of the use of Exporter is presented. - - If you need to export the state with key "pressure" on a single grid: - save = Exporter(g, "solution", folder_name="results") - save.write_vtu(["pressure"]) - - In a time loop, if you need to export states with keys "pressure" and - "displacement" stored in a grid bucket. - save = Exporter(gb, "solution", folder_name="results") - while time: - save.write_vtu(["pressure", "displacement"], time_step=i) - save.write_pvd(times) - - where times is a list of actual times (not time steps), associated - to the previously exported time steps. If times is not provided - the time steps will be used instead. - In general, pvd files gather data exported in separate files, including data on differently dimensioned grids, constant data, and finally time steps. @@ -100,52 +62,78 @@ class Exporter: NOTE: the following names are reserved for constant data exporting and should not be used otherwise (otherwise data is overwritten): grid_dim, is_mortar, mortar_side, cell_id, grid_node_number, - grid_edge_number + grid_edge_number. + + Examples: + # Here, merely a brief demonstration of the use of Exporter is presented. + + # If you need to export the state with key "pressure" on a single grid: + save = Exporter(g, "solution", folder_name="results") + save.write_vtu(["pressure"]) + + # In a time loop, if you need to export states with keys "pressure" and + # "displacement" stored in a mixed-dimensional grid. + save = Exporter(mdg, "solution", folder_name="results") + while time: + save.write_vtu(["pressure", "displacement"], time_step=i) + save.write_pvd(times) + + # where times is a list of actual times (not time steps), associated + # to the previously exported time steps. If times is not provided + # the time steps will be used instead. """ def __init__( self, - gb: Union[pp.Grid, pp.GridBucket], + grid: Union[pp.Grid, pp.MixedDimensionalGrid], file_name: str, folder_name: Optional[str] = None, **kwargs, ) -> None: """Initialization of Exporter. - Parameters: - gb (Union[pp.Grid, pp.Gridbucket]): grid or gridbucket containing all - mesh information to be exported. + Args: + grid (Union[pp.Grid, pp.MixedDimensionalGrid]): subdomain or mixed-dimensional grid + containing all mesh information (and data in the latter case) to be exported. file_name (str): basis for file names used for storing the output folder_name (str, optional): name of the folder in which files are stored - kwargs (optional): Optional keywords; - 'fixed_grid' (boolean) to control whether the grid(bucket) may be redfined - (default True); - 'binary' (boolean) controlling whether data is stored in binary format - (default True); - 'export_constants_separately' (boolean) controlling whether - constant data is exported in separate files, which may be of interest - when exporting large data sets (in particular of constant data) for - many time steps (default True); note, however, that the mesh is - exported to each vtu file, which may also require significant amount - of storage. + kwargs: Optional keywords arguments: + fixed_grid (boolean): to control whether the grid may be redfined later + (default True) + binary (boolean): controlling whether data is stored in binary format + (default True) + export_constants_separately (boolean): controlling whether + constant data is exported in separate files, which may be of interest + when exporting large data sets (in particular of constant data) for + many time steps (default True); note, however, that the mesh is + exported to each vtu file, which may also require significant amount + of storage. + + Raises: + TypeError if grid has other type than pp.Grid or pp.MixedDimensional grid + TypeError if kwargs contains unexpected keyword arguments + NotImplementedError if numba is not installed """ - # Exporter is operating on grid buckets. Convert to grid bucket if grid is provided. - if isinstance(gb, pp.Grid): - self.gb = pp.GridBucket() - self.gb.add_nodes(gb) - elif isinstance(gb, pp.GridBucket): - self.gb = gb + # Exporter is operating on mixed-dimensional grids. Convert to mixed-dimensional + # grids if subdomain grid is provided. + if isinstance(grid, pp.Grid): + self._mdg = pp.MixedDimensionalGrid() + self._mdg.add_subdomains(grid) + elif isinstance(grid, pp.MixedDimensionalGrid): + self._mdg = grid else: - raise TypeError("Exporter only supports single grids and grid buckets.") + raise TypeError( + "Exporter only supports subdomain and mixed-dimensional grids." + ) # Store target location for storing vtu and pvd files. - self.file_name = file_name - self.folder_name = folder_name + self._file_name = file_name + self._folder_name = folder_name # Check for optional keywords - self.fixed_grid: bool = kwargs.pop("fixed_grid", True) - self.binary: bool = kwargs.pop("binary", True) - self.export_constants_separately: bool = kwargs.pop( + self._fixed_grid: bool = kwargs.pop("fixed_grid", True) + self._binary: bool = kwargs.pop("binary", True) + self._export_constants_separately: bool = kwargs.pop( "export_constants_separately", True ) if kwargs: @@ -154,12 +142,12 @@ def __init__( # Generate infrastructure for storing fixed-dimensional grids in # meshio format. Include all but the 0-d grids - self.dims = np.setdiff1d(self.gb.all_dims(), [0]) + self._dims = np.unique([sd.dim for sd in self._mdg.subdomains() if sd.dim > 0]) self.meshio_geom: MD_Meshio_Geom = dict() # Generate infrastructure for storing fixed-dimensional mortar grids # in meshio format. - self.m_dims = np.unique([d["mortar_grid"].dim for _, d in self.gb.edges()]) + self._m_dims = np.unique([intf.dim for intf in self._mdg.interfaces()]) self.m_meshio_geom: MD_Meshio_Geom = dict() # Generate geometrical information in meshio format @@ -171,17 +159,17 @@ def __init__( self._time_step_counter: int = 0 # Storage for file name extensions for time steps - self._exported_timesteps: List[int] = [] + self._exported_timesteps: list[int] = [] # Reference to the last time step used for exporting constant data. self._time_step_constants: int = 0 # Storage for file name extensions for time steps, regarding constant data. - self._exported_timesteps_constants: List[int] = list() + self._exported_timesteps_constants: list[int] = list() # Identifier for whether constant data is up-to-date. self._exported_constant_data_up_to_date: bool = False # Parameter to be used in several occasions for adding time stamps. - self.padding = 6 + self._padding = 6 # Require numba if "numba" not in sys.modules: @@ -191,11 +179,11 @@ def __init__( def add_constant_data( self, - data: Optional[Union[DataInput, List[DataInput]]] = None, + data: Optional[Union[DataInput, list[DataInput]]] = None, ) -> None: """ Collect user-defined constant-in-time data, associated to grids, - and to be exported to separate files instead of the main files.. + and to be exported to separate files instead of the main files. In principle, constant data is not different from standard output data. It is merely printed to another file and just when any constant @@ -204,10 +192,10 @@ def add_constant_data( which is varying in time. As part of the routine, the data is converted to the same unified format. - Parameters: - data (Union[DataInput, List[DataInput]], optional): node and - edge data, prescribed through strings, or tuples of - grids/edges, keys and values. If not provided only + Args: + data (Union[DataInput, list[DataInput]], optional): subdomain and + interface data, prescribed through strings, or tuples of + subdomains/interfaces, keys and values. If not provided only geometical infos are exported. NOTE: The user has to make sure that each unique key has @@ -233,10 +221,10 @@ def add_constant_data( def write_vtu( self, - data: Optional[Union[DataInput, List[DataInput]]] = None, + data: Optional[Union[DataInput, list[DataInput]]] = None, time_dependent: bool = False, time_step: Optional[int] = None, - gb: Optional[Union[pp.Grid, pp.GridBucket]] = None, + grid: Optional[Union[pp.Grid, pp.MixedDimensionalGrid]] = None, ) -> None: """ Interface function to export the grid and additional data with meshio. @@ -245,10 +233,10 @@ def write_vtu( triangle/quad, while in 3d as polyhedra/tetrahedra/hexahedra. In all the dimensions the geometry of the mesh needs to be computed. - Parameters: - data (Union[DataInput, List[DataInput]], optional): node and - edge data, prescribed through strings, or tuples of - grids/edges, keys and values. If not provided only + Args: + data (Union[DataInput, list[DataInput]], optional): subdomain and + interface data, prescribed through strings, or tuples of + subdomains/interfaces, keys and values. If not provided only geometrical infos are exported. NOTE: The user has to make sure that each unique key has @@ -260,22 +248,26 @@ def write_vtu( the file names will subsequently be ending with 1, 2, etc. time_step (int, optional): will be used as appendix to define the file corresponding to this specific time step. - gb (Union[pp.Grid, pp.Gridbucket], optional): grid or gridbucket - if it is not fixed and should be updated. + grid (Union[pp.Grid, pp.MixedDimensionalGrid], optional): subdomain or + mixed-dimensional grid if it is not fixed and should be updated. + + Raises: + ValueError if a grid is provided as argument although the exporter + has been instructed that the grid is fixed """ - # Update the grid (bucket) but only if allowed, i.e., the initial grid - # (bucket) has not been characterized as fixed. - if self.fixed_grid and gb is not None: + # Update the grid but only if allowed, i.e., the initial grid + # has not been characterized as fixed. + if self._fixed_grid and grid is not None: raise ValueError("Inconsistency in exporter setting") - elif not self.fixed_grid and gb is not None: - # Require a grid bucket. Thus, convert grid -> grid bucket. - if isinstance(gb, pp.Grid): - # Create a new grid bucket solely with a single grid as nodes. - self.gb = pp.GridBucket() - self.gb.add_nodes(gb) + elif not self._fixed_grid and grid is not None: + # Require a mixed-dimensional grid. Thus convert if single subdomain grid provided. + if isinstance(grid, pp.Grid): + # Create a new mixed-dimensional solely with grid as single subdomain. + self._mdg = pp.MixedDimensionalGrid() + self._mdg.add_subdomains(grid) else: - self.gb = gb + self._mdg = grid # Update geometrical info in meshio format for the updated grid self._update_meshio_geom() @@ -297,7 +289,7 @@ def write_vtu( subdomain_data, interface_data = self._sort_and_unify_data(data) # Export constant data to separate or standard files - if self.export_constants_separately: + if self._export_constants_separately: # Export constant data to vtu when outdated if not self._exported_constant_data_up_to_date: # Export constant subdomain data to vtu @@ -328,10 +320,10 @@ def write_vtu( else: # Append constant subdomain and interface data to the # standard containers for subdomain and interface data. - for key_s, value in self._constant_subdomain_data.items(): - subdomain_data[key_s] = value.copy() - for key_i, value in self._constant_interface_data.items(): - interface_data[key_i] = value.copy() + for key_sd, value in self._constant_subdomain_data.items(): + subdomain_data[key_sd] = value.copy() + for key_intf, value in self._constant_interface_data.items(): + interface_data[key_intf] = value.copy() # Export subdomain and interface data to vtu format if existing if subdomain_data: @@ -339,33 +331,33 @@ def write_vtu( if interface_data: self._export_data_vtu(interface_data, time_step, interface_data=True) - # Export grid bucket to pvd format - file_name = self._make_file_name(self.file_name, time_step, extension=".pvd") - file_name = self._append_folder_name(self.folder_name, file_name) - self._export_gb_pvd(file_name, time_step) + # Export mixed-dimensional grid to pvd format + file_name = self._make_file_name(self._file_name, time_step, extension=".pvd") + file_name = self._append_folder_name(self._folder_name, file_name) + self._export_mdg_pvd(file_name, time_step) def write_pvd( self, times: Optional[np.ndarray] = None, - file_extension: Optional[Union[np.ndarray, List[int]]] = None, + file_extension: Optional[Union[np.ndarray, list[int]]] = None, ) -> None: """ Interface function to export in PVD file the time loop information. The user should open only this file in paraview. - We assume that the VTU associated files have the same name. - We assume that the VTU associated files are in the working directory. - - Parameters: - times (optional, np.ndarray): array of actual times to be exported. These will - be the times associated with indivdiual time steps in, say, Paraview. - By default, the times will be associated with the order in which the time - steps were exported. This can be overridden by the file_extension argument. - If no times are provided, the exported time steps are used. - file_extension (np.array-like, optional): End of file names used in the export - of individual time steps, see self.write_vtu(). If provided, it should have - the same length as time. If not provided, the file names will be picked - from those used when writing individual time steps. + We assume that the VTU associated files have the same name, and that. + the VTU associated files are in the working directory. + + Args: + times (np.ndarray, optional): array of actual times to be exported. These will + be the times associated with indivdiual time steps in, say, Paraview. + By default, the times will be associated with the order in which the time + steps were exported. This can be overridden by the file_extension argument. + If no times are provided, the exported time steps are used. + file_extension (np.array-like, optional): End of file names used in the export + of individual time steps, see self.write_vtu(). If provided, it should have + the same length as time. If not provided, the file names will be picked + from those used when writing individual time steps. """ if times is None: @@ -385,7 +377,7 @@ def write_pvd( # file_extension is a subset of _exported_timesteps. # Only if constant data has been exported. include_constant_data = ( - self.export_constants_separately + self._export_constants_separately and len(self._exported_timesteps_constants) > 0 ) if include_constant_data: @@ -394,11 +386,11 @@ def write_pvd( self._exported_timesteps_constants[i] for i in indices ] - # Perform the same procedure as in _export_gb_pvd + # Perform the same procedure as in _export_mdg_pvd # but looping over all designated time steps. o_file = open( - self._append_folder_name(self.folder_name, self.file_name) + ".pvd", "w" + self._append_folder_name(self._folder_name, self._file_name) + ".pvd", "w" ) b = "LittleEndian" if sys.byteorder == "little" else "BigEndian" c = ' compressor="vtkZLibDataCompressor"' @@ -414,23 +406,23 @@ def write_pvd( # Gather all data, and assign the actual time. for time, fn in zip(times, file_extension): # Go through all possible data types, analogously to - # _export_gb_pvd. + # _export_mdg_pvd. # Subdomain data - for dim in self.dims: + for dim in self._dims: if self.meshio_geom[dim] is not None: o_file.write( - fm % (time, self._make_file_name(self.file_name, fn, dim)) + fm % (time, self._make_file_name(self._file_name, fn, dim)) ) # Interface data. - for dim in self.m_dims: + for dim in self._m_dims: if self.m_meshio_geom[dim] is not None: o_file.write( fm % ( time, - self._make_file_name(self.file_name + "_mortar", fn, dim), + self._make_file_name(self._file_name + "_mortar", fn, dim), ) ) @@ -438,27 +430,27 @@ def write_pvd( if include_constant_data: for time, fn_constants in zip(times, file_extension_constants): # Constant subdomain data. - for dim in self.dims: + for dim in self._dims: if self.meshio_geom[dim] is not None: o_file.write( fm % ( time, self._make_file_name( - self.file_name + "_constant", fn_constants, dim + self._file_name + "_constant", fn_constants, dim ), ) ) # Constant interface data. - for dim in self.m_dims: + for dim in self._m_dims: if self.m_meshio_geom[dim] is not None: o_file.write( fm % ( time, self._make_file_name( - self.file_name + "_constant_mortar", + self._file_name + "_constant_mortar", fn_constants, dim, ), @@ -472,24 +464,27 @@ def write_pvd( def _sort_and_unify_data( self, - data=None, # ignore type which is essentially Union[DataInput, List[DataInput]] - ) -> Tuple[SubdomainData, InterfaceData]: + data=None, # ignore type which is essentially Union[DataInput, list[DataInput]] + ) -> tuple[SubdomainData, InterfaceData]: """ Preprocess data. The routine has two goals: 1. Splitting data into subdomain and interface data. 2. Unify the data format. Store data in dictionaries, with keys given by - grids/edges and names, and values given by the data arrays. + subdomains/interfaces and names, and values given by the data arrays. - Parameters: - data (Union[DataInput, List[DataInput]], optional): data + Args: + data (Union[DataInput, list[DataInput]], optional): data provided by the user in the form of strings and/or tuples - of grids/edges. + of subdomains/interfaces. Returns: - Tuple[SubdomainData, InterfaceData]: Subdomain and edge data decomposed and + tuple[SubdomainData, InterfaceData]: Subdomain and interface data decomposed and brought into unified format. + + Raises: + ValueError if the data type provided is not supported """ # The strategy is to traverse the input data and check each data point @@ -497,52 +492,69 @@ def _sort_and_unify_data( # For each supported input data format, a dedicated routine is implemented # to 1. identify whether the type is present; and 2. update the data # dictionaries (to be returned in this routine) in the correct format: - # The dictionaries use (grid,key) and (edge,key) as key and the data + # The dictionaries use (subdomain,key) and (interface,key) as key and the data # array as value. # Now a list of these subroutines follows before the definition of the # actual routine. # Aux. method: Transform scalar- to vector-ranged values. - def toVectorFormat(value: np.ndarray, g: pp.Grid) -> np.ndarray: + def _toVectorFormat( + value: np.ndarray, grid: Union[pp.Grid, pp.MortarGrid] + ) -> np.ndarray: """ Check whether the value array has the right dimension corresponding to the grid size. If possible, translate the value to a vectorial object, but do nothing if the data naturally can be interpreted as scalar data. + + Args: + value (np.ndarray): input array to be converted + grid (pp.Grid or pp.MortarGrid): subdomain or interface to which value + is associated to + + Raises: + ValueError if the value array is not compatible with the grid """ # Make some checks - if not value.size % g.num_cells == 0: + if not value.size % grid.num_cells == 0: # This line will raise an error if node or face data is exported. raise ValueError("The data array is not compatible with the grid.") # Convert to vectorial data if more data provided than grid cells available, # and the value array is not already in vectorial format - if not value.size == g.num_cells and not ( - len(value.shape) > 1 and value.shape[1] == g.num_cells + if not value.size == grid.num_cells and not ( + len(value.shape) > 1 and value.shape[1] == grid.num_cells ): - value = np.reshape(value, (-1, g.num_cells), "F") + value = np.reshape(value, (-1, grid.num_cells), "F") return value # Aux. method: Check type for Interface - def isinstance_interface(e: Interface) -> bool: + def isinstance_interface(intf: pp.MortarGrid) -> bool: """ - Implementation of isinstance(e, Interface). + Implementation of isinstance(intf, pp.MortarGrid). """ - return ( - isinstance(e, tuple) - and len(e) == 2 - and isinstance(e[0], pp.Grid) - and isinstance(e[1], pp.Grid) - ) - - # Aux. method: Detect and convert data of form "key". - def add_data_from_str(pt, subdomain_data, interface_data): + return isinstance(intf, pp.MortarGrid) + # return ( + # isinstance(e, tuple) + # and len(e) == 2 + # and isinstance(e[0], pp.Grid) + # and isinstance(e[1], pp.Grid) + # ) + + # TODO rename pt to data_pt? + # TODO typing + def add_data_from_str( + pt, subdomain_data: dict, interface_data: dict + ) -> tuple[dict, dict, bool]: """ - Check whether data is provided by a key of a field - could be both node - and edge data. If so, collect all data corresponding to grids and edges - identified by the key. + Check whether data is provided by a key of a field - could be both subdomain + and interface data. If so, collect all data corresponding to subdomains and + interfaces identified by the key. + + Raises: + ValueError if no data available in the mixed-dimensional grid for given key """ # Only continue in case data is of type str @@ -554,35 +566,60 @@ def add_data_from_str(pt, subdomain_data, interface_data): # Initialize tag storing whether data corrspeonding to the key has been found. has_key = False - # Check data associated to subdomain field data - for g, d in self.gb.nodes(): - if pp.STATE in d and key in d[pp.STATE]: - # Mark the key as found - has_key = True - + def _add_data( + key: str, + grid: Union[pp.Grid, pp.MortarGrid], + grid_data: dict, + export_data: dict, + ) -> bool: + if pp.STATE in grid_data and key in grid_data[pp.STATE]: # Fetch data and convert to vectorial format if suggested by the size - value: np.ndarray = toVectorFormat(d[pp.STATE][key], g) + value: np.ndarray = _toVectorFormat(grid_data[pp.STATE][key], grid) # Add data point in correct format to the collection - subdomain_data[(g, key)] = value + export_data[(grid, key)] = value - # Check data associated to interface field data - for e, d in self.gb.edges(): - if pp.STATE in d and key in d[pp.STATE]: - # Mark the key as found - has_key = True + # Mark as succes + return True + else: + return False - # Fetch data and convert to vectorial format if suggested by the size - mg = d["mortar_grid"] - value = toVectorFormat(d[pp.STATE][key], mg) + # Check data associated to subdomain field data + for sd, sd_data in self._mdg.subdomains(return_data=True): + if _add_data(key, sd, sd_data, subdomain_data): + has_key = True + # TODO rm + # if pp.STATE in sd_data and key in sd_data[pp.STATE]: + # # Mark the key as found + # has_key = True + # + # # Fetch data and convert to vectorial format if suggested by the size + # value: np.ndarray = _toVectorFormat(sd_data[pp.STATE][key], sd) + # + # # Add data point in correct format to the collection + # subdomain_data[(sd, key)] = value + + # TODO can we unify? for grid, grid_data in self._mdf.subdomains_and_interfaces(return_data=True) - # Add data point in correct format to the collection - interface_data[(e, key)] = value + # Check data associated to interface field data + for intf, intf_data in self._mdg.interfaces(return_data=True): + if _add_data(key, intf, intf_data, interface_data): + has_key = True + # TODO rm + # if pp.STATE in intf_data and key in intf_data[pp.STATE]: + # # Mark the key as found + # has_key = True + # + # # Fetch data and convert to vectorial format if suggested by the size + # value = _toVectorFormat(intf_data[pp.STATE][key], intf) + # + # # Add data point in correct format to the collection + # interface_data[(intf_data, key)] = value # Make sure the key exists if not has_key: raise ValueError( - f"No data with provided key {key} present in the grid bucket." + f"No data with provided key {key} present in the grid." ) # Return updated dictionaries and indicate succesful conversion. @@ -592,47 +629,49 @@ def add_data_from_str(pt, subdomain_data, interface_data): # Return original data dictionaries and indicate no modification. return subdomain_data, interface_data, False - # Aux. method: Detect and convert data of form ([grids], "key"). - def add_data_from_Tuple_subdomains_str( - pt: Tuple[List[pp.Grid], str], subdomain_data, interface_data - ): + def add_data_from_tuple_subdomains_str( + pt: tuple[list[pp.Grid], str], subdomain_data: dict, interface_data: dict + ) -> tuple[dict, dict, bool]: """ - Check whether data is provided as tuple (g, key), - where g is a list of grids, and key is a string. + Check whether data is provided as tuple (subdomains, key), + where subdomains is a list of subdomains, and key is a string. This routine explicitly checks only for subdomain data. + + Raises: + ValueError if there exists no state in the subdomain data with given key """ - # Implementation of isinstance(t, Tuple[List[pp.Grid], str]). + # Implementation of isinstance(pt, tuple[list[pp.Grid], str]). isinstance_tuple_subdomains_str = list(map(type, pt)) == [ list, str, - ] and all([isinstance(g, pp.Grid) for g in pt[0]]) + ] and all([isinstance(sd, pp.Grid) for sd in pt[0]]) # If of correct type, convert to unique format and update subdomain data. if isinstance_tuple_subdomains_str: # By construction, the 1. and 2. components are a list of grids and a key. - grids: List[pp.Grid] = pt[0] + subdomains: list[pp.Grid] = pt[0] key = pt[1] # Loop over grids and fetch the states corresponding to the key - for g in grids: + for sd in subdomains: # Fetch the data dictionary containing the data value - d = self.gb.node_props(g) + sd_data = self._mdg.subdomain_data(sd) # Make sure the data exists. - if not (pp.STATE in d and key in d[pp.STATE]): + if not (pp.STATE in sd_data and key in sd_data[pp.STATE]): raise ValueError( f"""No state with prescribed key {key} available on selected subdomains.""" ) # Fetch data and convert to vectorial format if suitable - value = toVectorFormat(d[pp.STATE][key], g) + value = _toVectorFormat(sd_data[pp.STATE][key], sd) # Add data point in correct format to collection - subdomain_data[(g, key)] = value + subdomain_data[(sd, key)] = value # Return updated dictionaries and indicate succesful conversion. return subdomain_data, interface_data, True @@ -642,45 +681,51 @@ def add_data_from_Tuple_subdomains_str( return subdomain_data, interface_data, False # Aux. method: Detect and convert data of form ([interfaces], "key"). - def add_data_from_Tuple_interfaces_str(pt, subdomain_data, interface_data): + def add_data_from_tuple_interfaces_str( + pt, subdomain_data, interface_data + ) -> tuple[dict, dict, bool]: """ - Check whether data is provided as tuple (e, key), - where e is a list of interfaces, and key is a string. + Check whether data is provided as tuple (interfaces, key), + where interfaces is a list of interfaces, and key is a string. This routine explicitly checks only for interface data. + + This routine is a translation of add_data_from_tuple_subdomains_str to interfaces + + Raises: + ValueError if there exists no state in the interface data with given key """ - # Implementation of isinstance(t, Tuple[List[Interface], str]). + # Implementation of isinstance(t, tuple[list[pp.MortarGrid], str]). isinstance_tuple_interfaces_str = list(map(type, pt)) == [ list, str, - ] and all([isinstance_interface(g) for g in pt[0]]) + ] and all([isinstance_interface(intf) for intf in pt[0]]) # If of correct type, convert to unique format and update subdomain data. if isinstance_tuple_interfaces_str: # By construction, the 1. and 2. components are a list of interfaces and a key. - edges: List[Interface] = pt[0] + interfaces: list[pp.MortarGrid] = pt[0] key = pt[1] - # Loop over edges and fetch the states corresponding to the key - for e in edges: + # Loop over interfaces and fetch the states corresponding to the key + for intf in interfaces: # Fetch the data dictionary containing the data value - d = self.gb.edge_props(e) + intf_data = self._mdg.interface_data(intf) # Make sure the data exists. - if not (pp.STATE in d and key in d[pp.STATE]): + if not (pp.STATE in intf_data and key in intf_data[pp.STATE]): raise ValueError( f"""No state with prescribed key {key} available on selected interfaces.""" ) # Fetch data and convert to vectorial format if suitable - mg = d["mortar_grid"] - value = toVectorFormat(d[pp.STATE][key], mg) + value = _toVectorFormat(intf_data[pp.STATE][key], intf) # Add data point in correct format to collection - interface_data[(e, key)] = value + interface_data[(intf, key)] = value # Return updated dictionaries and indicate succesful conversion. return subdomain_data, interface_data, True @@ -689,32 +734,36 @@ def add_data_from_Tuple_interfaces_str(pt, subdomain_data, interface_data): # Return original data dictionaries and indicate no modification. return subdomain_data, interface_data, False - # Aux. method: Detect and convert data of form (g, "key", value). - def add_data_from_Tuple_subdomain_str_array(pt, subdomain_data, interface_data): + def add_data_from_tuple_subdomain_str_array( + pt, subdomain_data, interface_data + ) -> tuple[dict, dict, bool]: """ - Check whether data is provided as tuple (g, key, data), - where g is a single grid, key is a string, and data is a user-defined data array. - This routine explicitly checks only for subdomain data. + Check whether data is provided as tuple (sd, key, data), + where sd is a single subdomain, key is a string, and data is a user-defined data + array. This routine explicitly checks only for subdomain data. """ - # Implementation of isinstance(t, Tuple[pp.Grid, str, np.ndarray]). + # Implementation of isinstance(t, tuple[pp.Grid, str, np.ndarray]). # NOTE: The type of a grid is identifying the specific grid type (simplicial # etc.) Thus, isinstance is used here to detect whether a grid is provided. types = list(map(type, pt)) - isinstance_Tuple_subdomain_str_array = isinstance(pt[0], pp.Grid) and types[ - 1: - ] == [str, np.ndarray] + isinstance_tuple_subdomain_str_array = ( + isinstance(pt[0], pp.Grid) + and types[1:] == [str, np.ndarray] + # TODO + # list(map(type, pt))[1:] == [str, np.ndarray] + ) # Convert data to unique format and update the subdomain data dictionary. - if isinstance_Tuple_subdomain_str_array: + if isinstance_tuple_subdomain_str_array: - # Interpret (g, key, value) = (pt[0], pt[1], pt[2]); - g = pt[0] + # Interpret (sd, key, value) = (pt[0], pt[1], pt[2]); + sd = pt[0] key = pt[1] - value = toVectorFormat(pt[2], g) + value = _toVectorFormat(pt[2], sd) # Add data point in correct format to collection - subdomain_data[(g, key)] = value + subdomain_data[(sd, key)] = value # Return updated dictionaries and indicate succesful conversion. return subdomain_data, interface_data, True @@ -723,34 +772,33 @@ def add_data_from_Tuple_subdomain_str_array(pt, subdomain_data, interface_data): # Return original data dictionaries and indicate no modification. return subdomain_data, interface_data, False - # Aux. method: Detect and convert data of form (e, "key", value). - def add_data_from_Tuple_interface_str_array(pt, subdomain_data, interface_data): + def add_data_from_tuple_interface_str_array( + pt, subdomain_data: dict, interface_data: dict + ) -> tuple[dict, dict, bool]: """ Check whether data is provided as tuple (g, key, data), where e is a single interface, key is a string, and data is a user-defined data array. This routine explicitly checks only for interface data. + + Translation of add_data_from_tuple_subdomain_str_array to interfaces. """ - # Implementation of isinstance(t, Tuple[Interface, str, np.ndarray]). - isinstance_Tuple_interface_str_array = list(map(type, pt)) == [ + # Implementation of isinstance(t, tuple[pp.MortarGrid, str, np.ndarray]). + isinstance_tuple_interface_str_array = list(map(type, pt)) == [ tuple, str, np.ndarray, ] and isinstance_interface(pt[0]) # Convert data to unique format and update the interface data dictionary. - if isinstance_Tuple_interface_str_array: - # Interpret (e, key, value) = (pt[0], pt[1], pt[2]); - e = pt[0] + if isinstance_tuple_interface_str_array: + # Interpret (intf, key, value) = (pt[0], pt[1], pt[2]); + intf = pt[0] key = pt[1] - - # Fetch grid data for converting the value to right format; - d = self.gb.edge_props(e) - mg = d["mortar_grid"] - value = toVectorFormat(pt[2], mg) + value = _toVectorFormat(pt[2], intf) # Add data point in correct format to collection - interface_data[(e, key)] = value + interface_data[(intf, key)] = value # Return updated dictionaries and indicate succesful conversion. return subdomain_data, interface_data, True @@ -759,36 +807,43 @@ def add_data_from_Tuple_interface_str_array(pt, subdomain_data, interface_data): # Return original data dictionaries and indicate no modification. return subdomain_data, interface_data, False - # Aux. method: Detect and convert data of form ("key", value). - def add_data_from_Tuple_str_array(pt, subdomain_data, interface_data): + # TODO can we unify to add_data_from_tuple_grid_str_array ? + + def add_data_from_tuple_str_array( + pt, subdomain_data, interface_data + ) -> tuple[dict, dict, bool]: """ Check whether data is provided by a tuple (key, data), where key is a string, and data is a user-defined data array. - This only works when the grid bucket contains a single grid. + This only works when the mixed-dimensional grid contains a single subdomain. + + Raises: + ValueError if the mixed-dimensional grid contains more than one subdomain """ - # Implementation if isinstance(pt, Tuple[str, np.ndarray]. - isinstance_Tuple_str_array = list(map(type, pt)) == [str, np.ndarray] + # Implementation if isinstance(pt, tuple[str, np.ndarray]. + isinstance_tuple_str_array = list(map(type, pt)) == [str, np.ndarray] # Convert data to unique format and update the interface data dictionary. - if isinstance_Tuple_str_array: + if isinstance_tuple_str_array: - # Fetch the correct grid. This option is only supported for grid - # buckets containing a single grid. - grids = [g for g, _ in self.gb.nodes()] - if not len(grids) == 1: + # Fetch the correct grid. This option is only supported for mixed-dimensional + # grids containing a single subdomain. + subdomains = self._mdg.subdomains() + if not len(subdomains) == 1: raise ValueError( f"""The data type used for {pt} is only - supported if the grid bucket only contains a single grid.""" + supported if the mixed-dimensional grid only contains a single + subdomain.""" ) - # Fetch remaining ingredients required to define node data element - g = grids[0] + # Fetch remaining ingredients required to define subdomain data element + sd = subdomains[0] key = pt[0] - value = toVectorFormat(pt[1], g) + value = _toVectorFormat(pt[1], sd) # Add data point in correct format to collection - subdomain_data[(g, key)] = value + subdomain_data[(sd, key)] = value # Return updated dictionaries and indicate succesful conversion. return subdomain_data, interface_data, True @@ -808,7 +863,7 @@ def add_data_from_Tuple_str_array(pt, subdomain_data, interface_data): elif not isinstance(data, list): data = [data] - # Initialize container for data associated to nodes + # Initialize container for data associated to subdomains and interfaces subdomain_data: SubdomainData = dict() interface_data: InterfaceData = dict() @@ -817,11 +872,11 @@ def add_data_from_Tuple_str_array(pt, subdomain_data, interface_data): # allowed. methods = [ add_data_from_str, - add_data_from_Tuple_subdomains_str, - add_data_from_Tuple_interfaces_str, - add_data_from_Tuple_subdomain_str_array, - add_data_from_Tuple_interface_str_array, - add_data_from_Tuple_str_array, + add_data_from_tuple_subdomains_str, + add_data_from_tuple_interfaces_str, + add_data_from_tuple_subdomain_str_array, + add_data_from_tuple_interface_str_array, + add_data_from_tuple_str_array, ] # Loop over all data points and collect them in a unified format. @@ -874,18 +929,18 @@ def _update_constant_mesh_data(self) -> None: self._constant_subdomain_data = dict() # Add mesh related, constant subdomain data by direct assignment - for g, d in self.gb.nodes(): - ones = np.ones(g.num_cells, dtype=int) - self._constant_subdomain_data[(g, "cell_id")] = np.arange( - g.num_cells, dtype=int + for sd, sd_data in self._mdg.subdomains(return_data=True): + ones = np.ones(sd.num_cells, dtype=int) + self._constant_subdomain_data[(sd, "cell_id")] = np.arange( + sd.num_cells, dtype=int ) - self._constant_subdomain_data[(g, "grid_dim")] = g.dim * ones - if "node_number" in d: - self._constant_subdomain_data[(g, "grid_node_number")] = ( - d["node_number"] * ones + self._constant_subdomain_data[(sd, "grid_dim")] = sd.dim * ones + if "node_number" in sd_data: + self._constant_subdomain_data[(sd, "grid_node_number")] = ( + sd_data["node_number"] * ones ) - self._constant_subdomain_data[(g, "is_mortar")] = 0 * ones - self._constant_subdomain_data[(g, "mortar_side")] = ( + self._constant_subdomain_data[(sd, "is_mortar")] = 0 * ones + self._constant_subdomain_data[(sd, "mortar_side")] = ( pp.grids.mortar_grid.MortarSides.NONE_SIDE.value * ones ) @@ -896,63 +951,62 @@ def _update_constant_mesh_data(self) -> None: self._constant_interface_data = dict() # Add mesh related, constant interface data by direct assignment. - for e, d in self.gb.edges(): + for intf, intf_data in self._mdg.interfaces(return_data=True): - # Fetch mortar grid - mg = d["mortar_grid"] - - # Construct empty arrays for all extra edge data - self._constant_interface_data[(e, "grid_dim")] = np.empty(0, dtype=int) - self._constant_interface_data[(e, "cell_id")] = np.empty(0, dtype=int) - self._constant_interface_data[(e, "grid_edge_number")] = np.empty( + # Construct empty arrays for all extra interface data + self._constant_interface_data[(intf, "grid_dim")] = np.empty(0, dtype=int) + self._constant_interface_data[(intf, "cell_id")] = np.empty(0, dtype=int) + self._constant_interface_data[(intf, "grid_edge_number")] = np.empty( + 0, dtype=int + ) + self._constant_interface_data[(intf, "is_mortar")] = np.empty(0, dtype=int) + self._constant_interface_data[(intf, "mortar_side")] = np.empty( 0, dtype=int ) - self._constant_interface_data[(e, "is_mortar")] = np.empty(0, dtype=int) - self._constant_interface_data[(e, "mortar_side")] = np.empty(0, dtype=int) # Initialize offset - mg_num_cells: int = 0 + side_grid_num_cells: int = 0 - # Assign extra edge data by collecting values on both sides. - for side, g in mg.side_grids.items(): - ones = np.ones(g.num_cells, dtype=int) + # Assign extra interface data by collecting values on both sides. + for side, grid in intf.side_grids.items(): + ones = np.ones(grid.num_cells, dtype=int) # Grid dimension of the mortar grid - self._constant_interface_data[(e, "grid_dim")] = np.hstack( - (self._constant_interface_data[(e, "grid_dim")], g.dim * ones) + self._constant_interface_data[(intf, "grid_dim")] = np.hstack( + (self._constant_interface_data[(intf, "grid_dim")], grid.dim * ones) ) # Cell ids of the mortar grid - self._constant_interface_data[(e, "cell_id")] = np.hstack( + self._constant_interface_data[(intf, "cell_id")] = np.hstack( ( - self._constant_interface_data[(e, "cell_id")], - np.arange(g.num_cells, dtype=int) + mg_num_cells, + self._constant_interface_data[(intf, "cell_id")], + np.arange(grid.num_cells, dtype=int) + side_grid_num_cells, ) ) - # Grid edge number of each edge - self._constant_interface_data[(e, "grid_edge_number")] = np.hstack( + # Grid edge number of each interface + self._constant_interface_data[(intf, "grid_edge_number")] = np.hstack( ( - self._constant_interface_data[(e, "grid_edge_number")], - d["edge_number"] * ones, + self._constant_interface_data[(intf, "grid_edge_number")], + intf_data["edge_number"] * ones, ) ) - # Whether the edge is mortar - self._constant_interface_data[(e, "is_mortar")] = np.hstack( - (self._constant_interface_data[(e, "is_mortar")], ones) + # Whether the interface is mortar + self._constant_interface_data[(intf, "is_mortar")] = np.hstack( + (self._constant_interface_data[(intf, "is_mortar")], ones) ) # Side of the mortar - self._constant_interface_data[(e, "mortar_side")] = np.hstack( + self._constant_interface_data[(intf, "mortar_side")] = np.hstack( ( - self._constant_interface_data[(e, "mortar_side")], + self._constant_interface_data[(intf, "mortar_side")], side.value * ones, ) ) # Update offset - mg_num_cells += g.num_cells + side_grid_num_cells += grid.num_cells def _export_data_vtu( self, @@ -968,14 +1022,19 @@ def _export_data_vtu( related to those subdomains will be exported simultaneously. Analogously for interfaces. - Parameters: + Args: data (Union[SubdomainData, InterfaceData]): Subdomain or interface data. time_step (int): time_step to be used to append the file name - kwargs (optional): Optional keywords; + kwargs: Optional keyword arguments: 'interface_data' (boolean) indicates whether data is associated to - an interface, default is False; + an interface, default is False; 'constant_data' (boolean) indicates whether data is treated as - constant in time, default is False. + constant in time, default is False. + + Raises: + TypeError if keyword arguments contain unsupported keyword + ValueError if data provided for some but not all subdomains or interfaces + of particular dimension """ # Check for optional keywords self.interface_data: bool = kwargs.pop("interface_data", False) @@ -988,16 +1047,16 @@ def _export_data_vtu( # of the available grids, and for interfaces fetch the dimensions of the available # mortar grids. is_subdomain_data: bool = not self.interface_data - dims = self.dims if is_subdomain_data else self.m_dims + dims = self._dims if is_subdomain_data else self._m_dims # Define file name base - file_name_base: str = self.file_name + file_name_base: str = self._file_name # Extend in case of constant data file_name_base += "_constant" if self.constant_data else "" # Extend in case of interface data file_name_base += "_mortar" if self.interface_data else "" # Append folder name to file name base - file_name_base = self._append_folder_name(self.folder_name, file_name_base) + file_name_base = self._append_folder_name(self._folder_name, file_name_base) # Collect unique keys, and for unique sorting, sort by alphabet keys = list(set([key for _, key in data])) @@ -1009,18 +1068,13 @@ def _export_data_vtu( file_name: str = self._make_file_name(file_name_base, time_step, dim) # Get all geometrical entities of dimension dim: - # subdomains with correct grid dimension for subdomain data, and - # interfaces with correct mortar grid dimension for interface data. - # TODO update and simplify when new GridTree structure is in place. if is_subdomain_data: - entities = self.gb.grids_of_dimension(dim).tolist() + entities = self._mdg.subdomains(dim=dim) else: - entities = [ - e for e, d in self.gb.edges() if d["mortar_grid"].dim == dim - ] + entities = self._mdg.interfaces(dim=dim) # Construct the list of fields represented on this dimension. - fields: List[Field] = [] + fields: list[Field] = [] for key in keys: # Collect the values associated to all entities values = [] @@ -1029,9 +1083,6 @@ def _export_data_vtu( values.append(data[(e, key)]) # Require data for all or none entities of that dimension. - # NOTE: As implemented now, for any grid dimension and any prescribed - # key, one has to provide data for all or none of the grids of that - # dimension. if len(values) not in [0, len(entities)]: raise ValueError( f"""Insufficient amount of data provided for @@ -1052,12 +1103,12 @@ def _export_data_vtu( if meshio_geom is not None: self._write(fields, file_name, meshio_geom) - def _export_gb_pvd(self, file_name: str, time_step: Optional[int]) -> None: + def _export_mdg_pvd(self, file_name: str, time_step: Optional[int]) -> None: """ Routine to export to pvd format and collect all data scattered over several files for distinct grid dimensions. - Parameters: + Args: file_name (str): storage path for pvd file time_step (int): used as appendix for the file name """ @@ -1077,39 +1128,42 @@ def _export_gb_pvd(self, file_name: str, time_step: Optional[int]) -> None: fm = '\t\n' # Subdomain data. - for dim in self.dims: + for dim in self._dims: if self.meshio_geom[dim] is not None: - o_file.write(fm % self._make_file_name(self.file_name, time_step, dim)) + o_file.write(fm % self._make_file_name(self._file_name, time_step, dim)) # Interface data. - for dim in self.m_dims: + for dim in self._m_dims: if self.m_meshio_geom[dim] is not None: o_file.write( fm - % self._make_file_name(self.file_name + "_mortar", time_step, dim) + % self._make_file_name(self._file_name + "_mortar", time_step, dim) ) # If constant data is exported to separate vtu files, also include # these here. The procedure is similar to the above, but the file names # incl. the relevant time step have to be adjusted. - if self.export_constants_separately: + if self._export_constants_separately: + # Constant subdomain data. - for dim in self.dims: + for dim in self._dims: if self.meshio_geom[dim] is not None: o_file.write( fm % self._make_file_name( - self.file_name + "_constant", self._time_step_constants, dim + self._file_name + "_constant", + self._time_step_constants, + dim, ) ) # Constant interface data. - for dim in self.m_dims: + for dim in self._m_dims: if self.m_meshio_geom[dim] is not None: o_file.write( fm % self._make_file_name( - self.file_name + "_constant_mortar", + self._file_name + "_constant_mortar", self._time_step_constants, dim, ) @@ -1127,33 +1181,40 @@ def _update_meshio_geom(self) -> None: ids, are created and stored. """ # Subdomains - for dim in self.dims: - # Get grids with dimension dim - g = self.gb.get_grids(lambda g: g.dim == dim) + for dim in self._dims: + # Get subdomains with dimension dim + subdomains = self._mdg.subdomains(dim=dim) # Export and store - self.meshio_geom[dim] = self._export_grid(g, dim) + self.meshio_geom[dim] = self._export_grid(subdomains, dim) # Interfaces - for dim in self.m_dims: - # Extract the mortar grids for dimension dim - mgs = self.gb.get_mortar_grids(lambda g: g.dim == dim) - # It contains the mortar grids "unrolled" by sides - mg = np.array([g for m in mgs for _, g in m.side_grids.items()]) + for dim in self._m_dims: + # Extract the mortar grids for dimension dim, unrolled by sides + interface_side_grids = [ + grid + for intf in self._mdg.interfaces(dim=dim) + for _, grid in intf.side_grids.items() + ] # Export and store - self.m_meshio_geom[dim] = self._export_grid(mg, dim) + self.m_meshio_geom[dim] = self._export_grid(interface_side_grids, dim) - def _export_grid(self, gs: Iterable[pp.Grid], dim: int) -> Union[None, Meshio_Geom]: + def _export_grid( + self, grids: Iterable[pp.Grid], dim: int + ) -> Union[None, Meshio_Geom]: """ Wrapper function to export grids of dimension dim. Calls the appropriate dimension specific export function. - Parameters: - gs (Iterable[pp.Grid]): Subdomains of same dimension. + Args: + grids (Iterable[pp.Grid]): Subdomains of same dimension. dim (int): Dimension of the subdomains. Returns: Meshio_Geom: Points, cells (storing the connectivity), and cell ids in correct meshio format. + + Raises: + ValueError if dim not 0, 1, 2, 3 """ if dim == 0: return None @@ -1166,13 +1227,13 @@ def _export_grid(self, gs: Iterable[pp.Grid], dim: int) -> Union[None, Meshio_Ge else: raise ValueError(f"Unknown dimension {dim}") - def _export_1d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: + def _export_grid_1d(self, grids: Iterable[pp.Grid]) -> Meshio_Geom: """ Export the geometrical data (point coordinates) and connectivity information from the 1d PorePy grids to meshio. - Parameters: - gs (Iterable[pp.Grid]): 1d grids. + Args: + grids (Iterable[pp.Grid]): 1d grids. Returns: Meshio_Geom: Points, 1d cells (storing the connectivity), @@ -1183,15 +1244,15 @@ def _export_1d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: cell_type = "line" # Dictionary storing cell->nodes connectivity information - cell_to_nodes: Dict[str, np.ndarray] = {cell_type: np.empty((0, 2), dtype=int)} + cell_to_nodes: dict[str, np.ndarray] = {cell_type: np.empty((0, 2), dtype=int)} # Dictionary collecting all cell ids for each cell type. # Since each cell is a line, the list of cell ids is trivial - total_num_cells = np.sum(np.array([g.num_cells for g in gs])) - cell_id: Dict[str, List[int]] = {cell_type: [i for i in range(total_num_cells)]} + total_num_cells = np.sum(np.array([grid.num_cells for grid in grids])) + cell_id: dict[str, list[int]] = {cell_type: [i for i in range(total_num_cells)]} # Data structure for storing node coordinates of all 1d grids. - num_pts = np.sum([g.num_nodes for g in gs]) + num_pts = np.sum([grid.num_nodes for grid in grids]).astype(int) meshio_pts = np.empty((num_pts, 3)) # type: ignore # Initialize offset. All data associated to 1d grids is stored in @@ -1200,14 +1261,14 @@ def _export_1d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: nodes_offset = 0 # Loop over all 1d grids - for g in gs: + for grid in grids: # Store node coordinates - sl = slice(nodes_offset, nodes_offset + g.num_nodes) - meshio_pts[sl, :] = g.nodes.T + sl = slice(nodes_offset, nodes_offset + grid.num_nodes) + meshio_pts[sl, :] = grid.nodes.T # Lines are 1-simplices, and have a trivial connectvity. - cn_indices = self._simplex_cell_to_nodes(1, g) + cn_indices = self._simplex_cell_to_nodes(1, grid) # Add to previous connectivity information cell_to_nodes[cell_type] = np.vstack( @@ -1215,7 +1276,7 @@ def _export_1d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: ) # Update offsets - nodes_offset += g.num_nodes + nodes_offset += grid.num_nodes # Construct the meshio data structure meshio_cells = list() @@ -1231,14 +1292,14 @@ def _export_1d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: return Meshio_Geom(meshio_pts, meshio_cells, meshio_cell_id) def _simplex_cell_to_nodes( - self, n: int, g: pp.Grid, cells: Optional[np.ndarray] = None + self, n: int, grid: pp.Grid, cells: Optional[np.ndarray] = None ) -> np.ndarray: """ Determine cell to node connectivity for a general n-simplex mesh. - Parameters: + Args: n (int): dimension of the simplices in the grid. - g (pp.Grid): grid containing cells and nodes. + grid (pp.Grid): grid containing cells and nodes. cells (np.ndarray, optional): all n-simplex cells Returns: @@ -1247,9 +1308,9 @@ def _simplex_cell_to_nodes( """ # Determine cell-node ptr cn_indptr = ( - g.cell_nodes().indptr[:-1] + grid.cell_nodes().indptr[:-1] if cells is None - else g.cell_nodes().indptr[cells] + else grid.cell_nodes().indptr[cells] ) # Each n-simplex has n+1 nodes @@ -1261,20 +1322,20 @@ def _simplex_cell_to_nodes( ).reshape(-1, order="F") # Detect all corresponding nodes by applying the expanded mask to the indices - expanded_cn_indices = g.cell_nodes().indices[expanded_cn_indptr] + expanded_cn_indices = grid.cell_nodes().indices[expanded_cn_indptr] # Convert to right format. cn_indices = np.reshape(expanded_cn_indices, (-1, num_nodes), order="C") return cn_indices - def _export_2d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: + def _export_grid_2d(self, grids: Iterable[pp.Grid]) -> Meshio_Geom: """ Export the geometrical data (point coordinates) and connectivity information from the 2d PorePy grids to meshio. - Parameters: - gs (Iterable[pp.Grid]): 2d grids. + Args: + grids (Iterable[pp.Grid]): 2d grids. Returns: Meshio_Geom: Points, 2d cells (storing the connectivity), and @@ -1288,12 +1349,12 @@ def _export_2d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # Dictionary storing cell->nodes connectivity information for all # cell types. For this, the nodes have to be sorted such that # they form a circular chain, describing the boundary of the cell. - cell_to_nodes: Dict[str, np.ndarray] = {} + cell_to_nodes: dict[str, np.ndarray] = {} # Dictionary collecting all cell ids for each cell type. - cell_id: Dict[str, List[int]] = {} + cell_id: dict[str, list[int]] = {} # Data structure for storing node coordinates of all 2d grids. - num_pts = np.sum([g.num_nodes for g in gs]) + num_pts = np.sum([grid.num_nodes for grid in grids]) meshio_pts = np.empty((num_pts, 3)) # type: ignore # Initialize offsets. All data associated to 2d grids is stored in @@ -1303,14 +1364,14 @@ def _export_2d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: cell_offset = 0 # Loop over all 2d grids - for g in gs: + for grid in grids: # Store node coordinates - sl = slice(nodes_offset, nodes_offset + g.num_nodes) - meshio_pts[sl, :] = g.nodes.T + sl = slice(nodes_offset, nodes_offset + grid.num_nodes) + meshio_pts[sl, :] = grid.nodes.T # Determine cell types based on number of faces=nodes per cell. - num_faces_per_cell = g.cell_faces.getnnz(axis=0) + num_faces_per_cell = grid.cell_faces.getnnz(axis=0) # Loop over all available cell types and group cells of one type. g_cell_map = dict() @@ -1350,19 +1411,19 @@ def _export_2d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # Fetch triangle cells cells = g_cell_map[cell_type] # Determine the trivial connectivity - triangles are 2-simplices. - cn_indices = self._simplex_cell_to_nodes(2, g, cells) + cn_indices = self._simplex_cell_to_nodes(2, grid, cells) # Quad and polygon cells. else: - # For quads/polygons, g.cell_nodes cannot be blindly used as for + # For quads/polygons, grid.cell_nodes cannot be blindly used as for # triangles, since the ordering of the nodes may define a cell of # the type (here specific for quads) # x--x and not x--x # \/ | | # /\ | | # x--x x--x . - # Therefore, use both g.cell_faces and g.face_nodes to make use of face - # information and sort those to retrieve the correct connectivity. + # Therefore, use both grid.cell_faces and grid.face_nodes to make use of + # face information and sort those to retrieve the correct connectivity. # Strategy: Collect all cell nodes including their connectivity in a # matrix of double num cell size. The goal will be to gather starting @@ -1375,19 +1436,19 @@ def _export_2d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # Determine all faces of all cells. Use an analogous approach as used to # determine all cell nodes for triangle cells. And use that a polygon with # n nodes has also n faces. - cf_indptr = g.cell_faces.indptr[cells] + cf_indptr = grid.cell_faces.indptr[cells] expanded_cf_indptr = np.vstack( [cf_indptr + i for i in range(n)] ).reshape(-1, order="F") - cf_indices = g.cell_faces.indices[expanded_cf_indptr] + cf_indices = grid.cell_faces.indices[expanded_cf_indptr] # Determine the associated (two) nodes of all faces for each cell. - fn_indptr = g.face_nodes.indptr[cf_indices] + fn_indptr = grid.face_nodes.indptr[cf_indices] # Extract nodes for first and second node of each face; reshape such # that all first nodes of all faces for each cell are stored in one row, # i.e., end up with an array of size num_cells (for this cell type) x n. cfn_indices = [ - g.face_nodes.indices[fn_indptr + i].reshape(-1, n) + grid.face_nodes.indices[fn_indptr + i].reshape(-1, n) for i in range(2) ] # Group first and second nodes, with alternating order of rows. @@ -1412,8 +1473,8 @@ def _export_2d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: ) # Update offsets - nodes_offset += g.num_nodes - cell_offset += g.num_cells + nodes_offset += grid.num_nodes + cell_offset += grid.num_cells # Construct the meshio data structure meshio_cells = list() @@ -1434,13 +1495,13 @@ def _export_2d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # Return final meshio data: points, cell (connectivity), cell ids return Meshio_Geom(meshio_pts, meshio_cells, meshio_cell_id) - def _export_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: + def _export_grid_3d(self, grids: Iterable[pp.Grid]) -> Meshio_Geom: """ Export the geometrical data (point coordinates) and connectivity information from 3d PorePy grids to meshio. - Parameters: - gs (Iterable[pp.Grid]): 3d grids. + Args: + grids (Iterable[pp.Grid]): 3d grids. Returns: Meshio_Geom: Points, 3d cells (storing the connectivity), and @@ -1454,18 +1515,18 @@ def _export_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # as polyhedra. # Determine the cell types present among all grids. - cell_types: Set[str] = set() - for g in gs: + cell_types: set[str] = set() + for grid in grids: # The number of faces per cell wil be later used to determining # the cell types - num_faces_per_cell = np.unique(g.cell_faces.getnnz(axis=0)) + num_faces_per_cell = np.unique(grid.cell_faces.getnnz(axis=0)) if num_faces_per_cell.shape[0] == 1: n = num_faces_per_cell[0] if n == 4: cell_types.add("tetra") - elif n == 6 and isinstance(g, pp.CartGrid): + elif n == 6 and isinstance(grid, pp.CartGrid): cell_types.add("hexahedron") else: cell_types.add("polyhedron") @@ -1474,19 +1535,19 @@ def _export_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # Use dedicated export for each grid type if len(cell_types) == 1 and "tetra" in cell_types: - return self._export_simplex_3d(gs) + return self._export_simplex_3d(grids) elif len(cell_types) == 1 and "hexahedron" in cell_types: - return self._export_hexahedron_3d(gs) + return self._export_hexahedron_3d(grids) else: - return self._export_polyhedron_3d(gs) + return self._export_polyhedron_3d(grids) - def _export_simplex_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: + def _export_simplex_3d(self, grids: Iterable[pp.Grid]) -> Meshio_Geom: """ Export the geometrical data (point coordinates) and connectivity information from 3d PorePy simplex grids to meshio. - Parameters: - gs (Iterable[pp.Grid]): 3d simplex grids. + Args: + grids (Iterable[pp.Grid]): 3d simplex grids. Returns: Meshio_Geom: Points, 3d cells (storing the connectivity), and @@ -1497,10 +1558,10 @@ def _export_simplex_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: cell_to_nodes = np.empty((0, 4), dtype=int) # Dictionary collecting all cell ids for each cell type. - cell_id: List[int] = [] + cell_id: list[int] = [] # Data structure for storing node coordinates of all 3d grids. - num_pts = np.sum([g.num_nodes for g in gs]) + num_pts = np.sum([grid.num_nodes for grid in grids]) meshio_pts = np.empty((num_pts, 3)) # type: ignore # Initialize offsets. All data associated to 3d grids is stored in @@ -1510,14 +1571,14 @@ def _export_simplex_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: cell_offset = 0 # Treat each 3d grid separately. - for g in gs: + for grid in grids: # Store node coordinates - sl = slice(nodes_offset, nodes_offset + g.num_nodes) - meshio_pts[sl, :] = g.nodes.T + sl = slice(nodes_offset, nodes_offset + grid.num_nodes) + meshio_pts[sl, :] = grid.nodes.T # Identify all cells as tetrahedra. - cells = np.arange(g.num_cells) + cells = np.arange(grid.num_cells) # Add offset taking into account previous grids cell_id += (cells + cell_offset).tolist() @@ -1525,15 +1586,15 @@ def _export_simplex_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # Determine local connectivity for all cells. Simplices are invariant # under permuatations. Thus, no specific ordering of nodes is required. # Tetrahedra are essentially 3-simplices. - cn_indices = self._simplex_cell_to_nodes(3, g, cells) + cn_indices = self._simplex_cell_to_nodes(3, grid, cells) # Store cell-node connectivity, and add offset taking into account # previous grids cell_to_nodes = np.vstack((cell_to_nodes, cn_indices + nodes_offset)) # Update offset - nodes_offset += g.num_nodes - cell_offset += g.num_cells + nodes_offset += grid.num_nodes + cell_offset += grid.num_cells # Initialize the meshio data structure for the connectivity and cell ids. # There is only one cell type, and meshio expects iterable data structs. @@ -1543,7 +1604,7 @@ def _export_simplex_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # Return final meshio data: points, cell (connectivity), cell ids return Meshio_Geom(meshio_pts, meshio_cells, meshio_cell_id) - def _export_hexahedron_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: + def _export_hexahedron_3d(self, grids: Iterable[pp.Grid]) -> Meshio_Geom: """ Export the geometrical data (point coordinates) and connectivity information from 3d PorePy hexahedron grids to meshio. @@ -1552,8 +1613,8 @@ def _export_hexahedron_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: However, meshio does solely handle unstructured grids and cannot for instance write to *.vtr format. - Parameters: - gs (Iterable[pp.Grid]): 3d hexahedron grids. + Args: + grids (Iterable[pp.Grid]): 3d hexahedron grids. Returns: Meshio_Geom: Points, 3d cells (storing the connectivity), and @@ -1564,10 +1625,10 @@ def _export_hexahedron_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: cell_to_nodes = np.empty((0, 8), dtype=int) # Dictionary collecting all cell ids for each cell type. - cell_id: List[int] = [] + cell_id: list[int] = [] # Data structure for storing node coordinates of all 3d grids. - num_pts = np.sum([g.num_nodes for g in gs]) + num_pts = np.sum([grid.num_nodes for grid in grids]) meshio_pts = np.empty((num_pts, 3)) # type: ignore # Initialize offsets. Required taking into account multiple 3d grids. @@ -1575,14 +1636,14 @@ def _export_hexahedron_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: cell_offset = 0 # Treat each 3d grid separately. - for g in gs: + for grid in grids: # Store node coordinates - sl = slice(nodes_offset, nodes_offset + g.num_nodes) - meshio_pts[sl, :] = g.nodes.T + sl = slice(nodes_offset, nodes_offset + grid.num_nodes) + meshio_pts[sl, :] = grid.nodes.T # Identify all cells as tetrahedra. - cells = np.arange(g.num_cells) + cells = np.arange(grid.num_cells) # Add offset taking into account previous grids cell_id += (cells + cell_offset).tolist() @@ -1600,9 +1661,9 @@ def _export_hexahedron_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # Determine cell-node ptr cn_indptr = ( - g.cell_nodes().indptr[:-1] + grid.cell_nodes().indptr[:-1] if cells is None - else g.cell_nodes().indptr[cells] + else grid.cell_nodes().indptr[cells] ) # Collect the indptr to all nodes of the cell; @@ -1612,7 +1673,7 @@ def _export_hexahedron_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: ) # Detect all corresponding nodes by applying the expanded mask to indices - expanded_cn_indices = g.cell_nodes().indices[expanded_cn_indptr] + expanded_cn_indices = grid.cell_nodes().indices[expanded_cn_indptr] # Convert to right format. cn_indices = np.reshape(expanded_cn_indices, (-1, 8), order="C") @@ -1622,15 +1683,15 @@ def _export_hexahedron_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: cn_indices = cn_indices[:, [0, 2, 6, 4, 1, 3, 7, 5]] # Test whether the hard-coded numbering really defines a hexahedron. - assert self._test_hex_meshio_format(g.nodes, cn_indices) + assert self._test_hex_meshio_format(grid.nodes, cn_indices) # Store cell-node connectivity, and add offset taking into account # previous grids cell_to_nodes = np.vstack((cell_to_nodes, cn_indices + nodes_offset)) # Update offset - nodes_offset += g.num_nodes - cell_offset += g.num_cells + nodes_offset += grid.num_nodes + cell_offset += grid.num_cells # Initialize the meshio data structure for the connectivity and cell ids. # There is only one cell type, and meshio expects iterable data structs. @@ -1729,13 +1790,13 @@ def _function_to_compile(nodes: np.ndarray, cn_indices: np.ndarray) -> bool: return _function_to_compile(nodes, cn_indices) - def _export_polyhedron_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: + def _export_polyhedron_3d(self, grids: Iterable[pp.Grid]) -> Meshio_Geom: """ Export the geometrical data (point coordinates) and connectivity information from 3d PorePy polyhedron grids to meshio. - Parameters: - gs (Iterable[pp.Grid]): 3d polyhedron grids. + Args: + grids (Iterable[pp.Grid]): 3d polyhedron grids. Returns: Meshio_Geom: Points, 3d cells (storing the connectivity), and @@ -1744,13 +1805,13 @@ def _export_polyhedron_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # For the general geometric type "polyhedron", cell->faces->nodes will # be used to store connectivity information, which can become significantly # larger than cell->nodes information used for special type grids. - cell_to_faces: Dict[str, List[List[int]]] = {} + cell_to_faces: dict[str, list[list[int]]] = {} # Dictionary collecting all cell ids for each cell type. - cell_id: Dict[str, List[int]] = {} + cell_id: dict[str, list[int]] = {} # Data structure for storing node coordinates of all 3d grids. - num_pts = np.sum([g.num_nodes for g in gs]) + num_pts = np.sum([grid.num_nodes for grid in grids]) meshio_pts = np.empty((num_pts, 3)) # type: ignore # Initialize offsets. Required taking into account multiple 3d grids. @@ -1758,15 +1819,15 @@ def _export_polyhedron_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: cell_offset = 0 # Treat each 3d grid separately. - for g in gs: + for grid in grids: # Store node coordinates - sl = slice(nodes_offset, nodes_offset + g.num_nodes) - meshio_pts[sl, :] = g.nodes.T + sl = slice(nodes_offset, nodes_offset + grid.num_nodes) + meshio_pts[sl, :] = grid.nodes.T # The number of faces per cell wil be later used to determining # the cell types - num_faces_per_cell = g.cell_faces.getnnz(axis=0) + num_faces_per_cell = grid.cell_faces.getnnz(axis=0) # Categorize all polyhedron cells by their number of faces. # Each category will be treated separatrely allowing for using @@ -1796,21 +1857,21 @@ def _export_polyhedron_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: # The general strategy is to define the connectivity as cell-face-nodes # information, where the faces are defined by nodes. Hence, this # information is significantly larger than the info provided for - # tetra cells. Here, we make use of the fact that g.face_nodes + # tetra cells. Here, we make use of the fact that grid.face_nodes # provides nodes ordered wrt. the right-hand rule. # Fetch cells with n faces cells = g_cell_map[cell_type] # Store short cuts to cell-face and face-node information - cf_indptr = g.cell_faces.indptr - cf_indices = g.cell_faces.indices - fn_indptr = g.face_nodes.indptr - fn_indices = g.face_nodes.indices + cf_indptr = grid.cell_faces.indptr + cf_indices = grid.cell_faces.indices + fn_indptr = grid.face_nodes.indptr + fn_indices = grid.face_nodes.indices # Determine the cell-face connectivity (with faces described by their # nodes ordered such that they form a chain and are identified by the - # face boundary. The final data format is a List[List[np.ndarray]]. + # face boundary. The final data format is a list[list[np.ndarray]]. # The outer list loops over all cells. Each cell entry contains a # list over faces, and each face entry is given by the face nodes. if cell_type not in cell_to_faces: @@ -1824,8 +1885,8 @@ def _export_polyhedron_3d(self, gs: Iterable[pp.Grid]) -> Meshio_Geom: ] # Update offset - nodes_offset += g.num_nodes - cell_offset += g.num_cells + nodes_offset += grid.num_nodes + cell_offset += grid.num_cells # Initialize the meshio data structure for the connectivity and cell ids. meshio_cells = list() @@ -1849,15 +1910,18 @@ def _write( """ Interface to meshio for exporting cell data. - Parameters: + Args: fields (iterable, Field): fields which shall be exported file_name (str): name of final file of export meshio_geom (Meshio_Geom): Namedtuple of points, connectivity information, and cell ids in meshio format (for a single dimension). + + Raises: + ValueError if some data has wrong dimension """ # Initialize empty cell data dictinary - cell_data: Dict[str, List[np.ndarray]] = {} + cell_data: dict[str, list[np.ndarray]] = {} # Split the data for each group of geometrically uniform cells # Utilize meshio_geom for this. @@ -1876,7 +1940,7 @@ def _write( elif field.values.ndim == 2: cell_data[field.name].append(field.values[:, ids].T) else: - raise ValueError + raise ValueError("Data values have wrong dimension") # Create the meshio object meshio_grid_to_export = meshio.Mesh( @@ -1884,7 +1948,7 @@ def _write( ) # Write mesh information and data to VTK format. - meshio.write(file_name, meshio_grid_to_export, binary=self.binary) + meshio.write(file_name, meshio_grid_to_export, binary=self._binary) def _append_folder_name( self, folder_name: Optional[str] = None, name: str = "" @@ -1893,7 +1957,7 @@ def _append_folder_name( Auxiliary method setting up potentially non-existent folder structure and setting up a path for exporting. - Parameters: + Args: folder_name (str, optional): name of the folder name (str): prefix of the name of the files to be written @@ -1927,7 +1991,7 @@ def _make_file_name( and possibly the dimension of underlying grid and time (step) the related data is associated to. - Parameters: + Args: file_name (str): prefix of the name of file to be exported time_step (Union[float int], optional): time or time step (index) dim (int, optional): dimension of the exported grid @@ -1940,7 +2004,7 @@ def _make_file_name( # Define non-empty time step extension including zero padding. time_extension = ( - "" if time_step is None else "_" + str(time_step).zfill(self.padding) + "" if time_step is None else "_" + str(time_step).zfill(self._padding) ) # Define non-empty dim extension diff --git a/tests/unit/test_vtk.py b/tests/unit/test_vtk.py index 6b704aa136..34b8288b67 100644 --- a/tests/unit/test_vtk.py +++ b/tests/unit/test_vtk.py @@ -32,7 +32,7 @@ def test_single_subdomain_1d(self): dummy_vector = np.ones((3, sd.num_cells)) * sd.dim save = pp.Exporter( - g, + sd, self.file_name, self.folder, binary=False, @@ -52,7 +52,7 @@ def test_single_subdomain_2d_simplex_grid(self): dummy_vector = np.ones((3, sd.num_cells)) * sd.dim save = pp.Exporter( - g, + sd, self.file_name, self.folder, binary=False, @@ -72,7 +72,7 @@ def test_single_subdomain_2d_cart_grid(self): dummy_vector = np.ones((3, sd.num_cells)) * sd.dim save = pp.Exporter( - g, + sd, self.file_name, self.folder, binary=False, @@ -84,11 +84,11 @@ def test_single_subdomain_2d_cart_grid(self): content = self.sliceout(content_file.read()) self.assertTrue(content == self._single_subdomain_2d_cart_grid_vtu()) - def test_single_grid_2d_polytop(self): - g = pp.StructuredTriangleGrid([2] * 2, [1] * 2) - g.compute_geometry() - pp.coarsening.generate_coarse_grid(g, [0, 1, 3, 3, 1, 1, 2, 2]) - g.compute_geometry() + def test_single_subdomain_2d_polytop(self): + sd = pp.StructuredTriangleGrid([2] * 2, [1] * 2) + sd.compute_geometry() + pp.coarsening.generate_coarse_grid(sd, [0, 1, 3, 3, 1, 1, 2, 2]) + sd.compute_geometry() # TODO try out both Structured TriangleGrid and CartGrid #======= # def test_single_subdomain_2d_polytop_grid(self): @@ -102,7 +102,7 @@ def test_single_grid_2d_polytop(self): dummy_vector = np.ones((3, sd.num_cells)) * sd.dim save = pp.Exporter( - g, + sd, self.file_name, self.folder, binary=False, @@ -122,7 +122,7 @@ def test_single_subdomain_3d_simplex_grid(self): dummy_vector = np.ones((3, sd.num_cells)) * sd.dim save = pp.Exporter( - g, + sd, self.file_name, self.folder, binary=False, @@ -142,7 +142,7 @@ def test_single_subdomain_3d_cart_grid(self): dummy_vector = np.ones((3, sd.num_cells)) * sd.dim save = pp.Exporter( - g, + sd, self.file_name, self.folder, binary=False, @@ -166,7 +166,7 @@ def test_single_subdomain_3d_polytop_grid(self): dummy_vector = np.ones((3, sd.num_cells)) * sd.dim save = pp.Exporter( - g, + sd, self.file_name, self.folder, binary=False, @@ -176,11 +176,11 @@ def test_single_subdomain_3d_polytop_grid(self): with open(self.folder + self.file_name + "_3.vtu", "r") as content_file: content = self.sliceout(content_file.read()) - self.assertTrue(content == self._single_grid_3d_polytop_grid_vtu()) + self.assertTrue(content == self._single_subdomain_3d_polytop_grid_vtu()) # NOTE: Suggest removing this test. - def test_gb_1(self): - gb, _ = pp.grid_buckets_2d.single_horizontal([4, 4], simplex=False) + def test_mdg_1(self): + mdg, _ = pp.grid_buckets_2d.single_horizontal([4, 4], simplex=False) for sd, sd_data in mdg.subdomains(return_data=True): pp.set_state( @@ -192,7 +192,7 @@ def test_gb_1(self): ) save = pp.Exporter( - gb, + mdg, self.file_name, self.folder, binary=False, @@ -237,7 +237,7 @@ def test_mdg_2(self): ) save = pp.Exporter( - gb, + mdg, self.file_name, self.folder, binary=False, @@ -257,38 +257,36 @@ def test_mdg_2(self): content = self.sliceout(content_file.read()) self.assertTrue(content == self._mdg_2_mortar_grid_1_vtu()) - def test_gb_3(self): - gb, _ = pp.grid_buckets_2d.two_intersecting( + def test_mdg_3(self): + mdg, _ = pp.grid_buckets_2d.two_intersecting( [4, 4], y_endpoints=[0.25, 0.75], simplex=False ) - gb.add_node_props(["dummy_scalar", "dummy_vector"]) - for g, d in gb: + for sd, sd_data in mdg.subdomains(return_data=True): pp.set_state( - d, + sd_data, { - "dummy_scalar": np.ones(g.num_cells) * g.dim, - "dummy_vector": np.ones((3, g.num_cells)) * g.dim, + "dummy_scalar": np.ones(sd.num_cells) * sd.dim, + "dummy_vector": np.ones((3, sd.num_cells)) * sd.dim, }, ) - for e, d in gb.edges(): - g = d["mortar_grid"] + for intf, intf_data in mdg.interfaces(return_data=True): pp.set_state( - d, + intf_data, { - "dummy_scalar": np.zeros(g.num_cells), - "unique_dummy_scalar": np.zeros(g.num_cells), + "dummy_scalar": np.zeros(intf.num_cells), + "unique_dummy_scalar": np.zeros(intf.num_cells), }, ) - subdomains_1d = gb.get_grids(lambda g: g.dim==1).tolist() - subdomains_2d = gb.get_grids(lambda g: g.dim==2).tolist() - g_2d = subdomains_2d[0] - interfaces = [e for e, d in gb.edges() if d["mortar_grid"].dim == 1] + subdomains_1d = mdg.subdomains(dim=1) + subdomains_2d = mdg.subdomains(dim=2) + sd_2d = subdomains_2d[0] + #interfaces_1d = mdg.interfaces(dim=1) # FIXME not used below save = pp.Exporter( - gb, + mdg, self.file_name, self.folder, binary=False, @@ -298,20 +296,20 @@ def test_gb_3(self): (subdomains_1d, "dummy_scalar"), "dummy_vector", "unique_dummy_scalar", - (g_2d, "cc", g_2d.cell_centers) + (sd_2d, "cc", sd_2d.cell_centers) ]) with open(self.folder + self.file_name + "_1.vtu", "r") as content_file: content = self.sliceout(content_file.read()) - self.assertTrue(content == self._gb_3_grid_1_vtu()) + self.assertTrue(content == self._mdg_3_grid_1_vtu()) with open(self.folder + self.file_name + "_2.vtu", "r") as content_file: content = self.sliceout(content_file.read()) - self.assertTrue(content == self._gb_3_grid_2_vtu()) + self.assertTrue(content == self._mdg_3_grid_2_vtu()) with open(self.folder + self.file_name + "_mortar_1.vtu", "r") as content_file: content = self.sliceout(content_file.read()) - self.assertTrue(content == self._gb_3_mortar_grid_1_vtu()) + self.assertTrue(content == self._mdg_3_mortar_grid_1_vtu()) def test_constant_data(self): g = pp.StructuredTriangleGrid([3] * 2, [1] * 2) @@ -3796,7 +3794,7 @@ def _single_subdomain_3d_simplex_grid_vtu(self): """ - def _single_grid_3d_cart_grid_vtu(self): + def _single_subdomain_3d_cart_grid_vtu(self): return """ @@ -5370,7 +5368,7 @@ def _single_grid_3d_cart_grid_vtu(self): """ - def _single_grid_3d_polytop_grid_vtu(self): + def _single_subdomain_3d_polytop_grid_vtu(self): return """ @@ -6093,7 +6091,7 @@ def _single_grid_3d_polytop_grid_vtu(self): """ - def _gb_1_grid_1_vtu(self): + def _mdg_1_grid_1_vtu(self): return """ @@ -6210,7 +6208,7 @@ def _gb_1_grid_1_vtu(self): """ - def _gb_1_grid_2_vtu(self): + def _mdg_1_grid_2_vtu(self): return """ @@ -6590,7 +6588,7 @@ def _gb_1_grid_2_vtu(self): """ - def _gb_1_mortar_grid_vtu(self): + def _mdg_1_mortar_grid_vtu(self): return """ @@ -6736,7 +6734,7 @@ def _gb_1_mortar_grid_vtu(self): """ - def _gb_2_grid_1_vtu(self): + def _mdg_2_grid_1_vtu(self): return """ @@ -6894,7 +6892,7 @@ def _gb_2_grid_1_vtu(self): """ - def _gb_2_grid_2_vtu(self): + def _mdg_2_grid_2_vtu(self): return """ @@ -7280,7 +7278,7 @@ def _gb_2_grid_2_vtu(self): """ - def _gb_2_mortar_grid_1_vtu(self): + def _mdg_2_mortar_grid_1_vtu(self): return """ @@ -7522,7 +7520,7 @@ def _gb_2_mortar_grid_1_vtu(self): """ - def _gb_3_grid_1_vtu(self): + def _mdg_3_grid_1_vtu(self): return """ @@ -7680,7 +7678,7 @@ def _gb_3_grid_1_vtu(self): """ - def _gb_3_grid_2_vtu(self): + def _mdg_3_grid_2_vtu(self): return """ @@ -8098,7 +8096,7 @@ def _gb_3_grid_2_vtu(self): """ - def _gb_3_mortar_grid_1_vtu(self): + def _mdg_3_mortar_grid_1_vtu(self): return """ From 573c36efc35012a618a9b6aef88cf8365348a950 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 1 Jul 2022 12:36:58 +0200 Subject: [PATCH 72/83] MAINT: Remove redundant code and comments. --- src/porepy/viz/exporter.py | 53 ++++++-------------------------------- 1 file changed, 8 insertions(+), 45 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 64028b6964..fba4f1e268 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -530,19 +530,6 @@ def _toVectorFormat( return value - # Aux. method: Check type for Interface - def isinstance_interface(intf: pp.MortarGrid) -> bool: - """ - Implementation of isinstance(intf, pp.MortarGrid). - """ - return isinstance(intf, pp.MortarGrid) - # return ( - # isinstance(e, tuple) - # and len(e) == 2 - # and isinstance(e[0], pp.Grid) - # and isinstance(e[1], pp.Grid) - # ) - # TODO rename pt to data_pt? # TODO typing def add_data_from_str( @@ -574,7 +561,9 @@ def _add_data( ) -> bool: if pp.STATE in grid_data and key in grid_data[pp.STATE]: # Fetch data and convert to vectorial format if suggested by the size - value: np.ndarray = _toVectorFormat(grid_data[pp.STATE][key], grid) + value: np.ndarray = _toVectorFormat( + grid_data[pp.STATE][key], grid + ) # Add data point in correct format to the collection export_data[(grid, key)] = value @@ -588,33 +577,11 @@ def _add_data( for sd, sd_data in self._mdg.subdomains(return_data=True): if _add_data(key, sd, sd_data, subdomain_data): has_key = True - # TODO rm - # if pp.STATE in sd_data and key in sd_data[pp.STATE]: - # # Mark the key as found - # has_key = True - # - # # Fetch data and convert to vectorial format if suggested by the size - # value: np.ndarray = _toVectorFormat(sd_data[pp.STATE][key], sd) - # - # # Add data point in correct format to the collection - # subdomain_data[(sd, key)] = value - - # TODO can we unify? for grid, grid_data in self._mdf.subdomains_and_interfaces(return_data=True) # Check data associated to interface field data for intf, intf_data in self._mdg.interfaces(return_data=True): if _add_data(key, intf, intf_data, interface_data): has_key = True - # TODO rm - # if pp.STATE in intf_data and key in intf_data[pp.STATE]: - # # Mark the key as found - # has_key = True - # - # # Fetch data and convert to vectorial format if suggested by the size - # value = _toVectorFormat(intf_data[pp.STATE][key], intf) - # - # # Add data point in correct format to the collection - # interface_data[(intf_data, key)] = value # Make sure the key exists if not has_key: @@ -699,7 +666,7 @@ def add_data_from_tuple_interfaces_str( isinstance_tuple_interfaces_str = list(map(type, pt)) == [ list, str, - ] and all([isinstance_interface(intf) for intf in pt[0]]) + ] and all([isinstance(intf, pp.MortarGrid) for intf in pt[0]]) # If of correct type, convert to unique format and update subdomain data. if isinstance_tuple_interfaces_str: @@ -746,13 +713,9 @@ def add_data_from_tuple_subdomain_str_array( # Implementation of isinstance(t, tuple[pp.Grid, str, np.ndarray]). # NOTE: The type of a grid is identifying the specific grid type (simplicial # etc.) Thus, isinstance is used here to detect whether a grid is provided. - types = list(map(type, pt)) - isinstance_tuple_subdomain_str_array = ( - isinstance(pt[0], pp.Grid) - and types[1:] == [str, np.ndarray] - # TODO - # list(map(type, pt))[1:] == [str, np.ndarray] - ) + isinstance_tuple_subdomain_str_array = isinstance(pt[0], pp.Grid) and list( + map(type, pt) + )[1:] == [str, np.ndarray] # Convert data to unique format and update the subdomain data dictionary. if isinstance_tuple_subdomain_str_array: @@ -788,7 +751,7 @@ def add_data_from_tuple_interface_str_array( tuple, str, np.ndarray, - ] and isinstance_interface(pt[0]) + ] and isinstance(pt[0], pp.MortarGrid) # Convert data to unique format and update the interface data dictionary. if isinstance_tuple_interface_str_array: From c125e16e96b1ed5720b2c4eb67b3cd4eb486800b Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 1 Jul 2022 12:42:14 +0200 Subject: [PATCH 73/83] STY: black, isort, flake8, mypy. --- src/porepy/viz/exporter.py | 84 +++++++++++++++++++------------------- 1 file changed, 43 insertions(+), 41 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index fba4f1e268..3e2e66b9ab 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -10,7 +10,7 @@ import os import sys from collections import namedtuple -from typing import Iterable, Optional, Union +from typing import Any, Iterable, Optional, Union import meshio import numpy as np @@ -533,7 +533,7 @@ def _toVectorFormat( # TODO rename pt to data_pt? # TODO typing def add_data_from_str( - pt, subdomain_data: dict, interface_data: dict + data_pt, subdomain_data: dict, interface_data: dict ) -> tuple[dict, dict, bool]: """ Check whether data is provided by a key of a field - could be both subdomain @@ -545,10 +545,10 @@ def add_data_from_str( """ # Only continue in case data is of type str - if isinstance(pt, str): + if isinstance(data_pt, str): # Identify the key provided through the data. - key = pt + key = data_pt # Initialize tag storing whether data corrspeonding to the key has been found. has_key = False @@ -597,7 +597,9 @@ def _add_data( return subdomain_data, interface_data, False def add_data_from_tuple_subdomains_str( - pt: tuple[list[pp.Grid], str], subdomain_data: dict, interface_data: dict + data_pt: tuple[list[pp.Grid], str], + subdomain_data: dict, + interface_data: dict, ) -> tuple[dict, dict, bool]: """ Check whether data is provided as tuple (subdomains, key), @@ -608,18 +610,18 @@ def add_data_from_tuple_subdomains_str( ValueError if there exists no state in the subdomain data with given key """ - # Implementation of isinstance(pt, tuple[list[pp.Grid], str]). - isinstance_tuple_subdomains_str = list(map(type, pt)) == [ + # Implementation of isinstance(data_pt, tuple[list[pp.Grid], str]). + isinstance_tuple_subdomains_str = list(map(type, data_pt)) == [ list, str, - ] and all([isinstance(sd, pp.Grid) for sd in pt[0]]) + ] and all([isinstance(sd, pp.Grid) for sd in data_pt[0]]) # If of correct type, convert to unique format and update subdomain data. if isinstance_tuple_subdomains_str: # By construction, the 1. and 2. components are a list of grids and a key. - subdomains: list[pp.Grid] = pt[0] - key = pt[1] + subdomains: list[pp.Grid] = data_pt[0] + key = data_pt[1] # Loop over grids and fetch the states corresponding to the key for sd in subdomains: @@ -649,7 +651,7 @@ def add_data_from_tuple_subdomains_str( # Aux. method: Detect and convert data of form ([interfaces], "key"). def add_data_from_tuple_interfaces_str( - pt, subdomain_data, interface_data + data_pt, subdomain_data, interface_data ) -> tuple[dict, dict, bool]: """ Check whether data is provided as tuple (interfaces, key), @@ -663,17 +665,17 @@ def add_data_from_tuple_interfaces_str( """ # Implementation of isinstance(t, tuple[list[pp.MortarGrid], str]). - isinstance_tuple_interfaces_str = list(map(type, pt)) == [ + isinstance_tuple_interfaces_str = list(map(type, data_pt)) == [ list, str, - ] and all([isinstance(intf, pp.MortarGrid) for intf in pt[0]]) + ] and all([isinstance(intf, pp.MortarGrid) for intf in data_pt[0]]) # If of correct type, convert to unique format and update subdomain data. if isinstance_tuple_interfaces_str: # By construction, the 1. and 2. components are a list of interfaces and a key. - interfaces: list[pp.MortarGrid] = pt[0] - key = pt[1] + interfaces: list[pp.MortarGrid] = data_pt[0] + key = data_pt[1] # Loop over interfaces and fetch the states corresponding to the key for intf in interfaces: @@ -702,7 +704,7 @@ def add_data_from_tuple_interfaces_str( return subdomain_data, interface_data, False def add_data_from_tuple_subdomain_str_array( - pt, subdomain_data, interface_data + data_pt, subdomain_data, interface_data ) -> tuple[dict, dict, bool]: """ Check whether data is provided as tuple (sd, key, data), @@ -713,17 +715,17 @@ def add_data_from_tuple_subdomain_str_array( # Implementation of isinstance(t, tuple[pp.Grid, str, np.ndarray]). # NOTE: The type of a grid is identifying the specific grid type (simplicial # etc.) Thus, isinstance is used here to detect whether a grid is provided. - isinstance_tuple_subdomain_str_array = isinstance(pt[0], pp.Grid) and list( - map(type, pt) - )[1:] == [str, np.ndarray] + isinstance_tuple_subdomain_str_array = isinstance( + data_pt[0], pp.Grid + ) and list(map(type, data_pt))[1:] == [str, np.ndarray] # Convert data to unique format and update the subdomain data dictionary. if isinstance_tuple_subdomain_str_array: - # Interpret (sd, key, value) = (pt[0], pt[1], pt[2]); - sd = pt[0] - key = pt[1] - value = _toVectorFormat(pt[2], sd) + # Interpret (sd, key, value) = (data_pt[0], data_pt[1], data_pt[2]); + sd = data_pt[0] + key = data_pt[1] + value = _toVectorFormat(data_pt[2], sd) # Add data point in correct format to collection subdomain_data[(sd, key)] = value @@ -736,7 +738,7 @@ def add_data_from_tuple_subdomain_str_array( return subdomain_data, interface_data, False def add_data_from_tuple_interface_str_array( - pt, subdomain_data: dict, interface_data: dict + data_pt, subdomain_data: dict, interface_data: dict ) -> tuple[dict, dict, bool]: """ Check whether data is provided as tuple (g, key, data), @@ -747,18 +749,18 @@ def add_data_from_tuple_interface_str_array( """ # Implementation of isinstance(t, tuple[pp.MortarGrid, str, np.ndarray]). - isinstance_tuple_interface_str_array = list(map(type, pt)) == [ + isinstance_tuple_interface_str_array = list(map(type, data_pt)) == [ tuple, str, np.ndarray, - ] and isinstance(pt[0], pp.MortarGrid) + ] and isinstance(data_pt[0], pp.MortarGrid) # Convert data to unique format and update the interface data dictionary. if isinstance_tuple_interface_str_array: - # Interpret (intf, key, value) = (pt[0], pt[1], pt[2]); - intf = pt[0] - key = pt[1] - value = _toVectorFormat(pt[2], intf) + # Interpret (intf, key, value) = (data_pt[0], data_pt[1], data_pt[2]); + intf = data_pt[0] + key = data_pt[1] + value = _toVectorFormat(data_pt[2], intf) # Add data point in correct format to collection interface_data[(intf, key)] = value @@ -773,7 +775,7 @@ def add_data_from_tuple_interface_str_array( # TODO can we unify to add_data_from_tuple_grid_str_array ? def add_data_from_tuple_str_array( - pt, subdomain_data, interface_data + data_pt, subdomain_data, interface_data ) -> tuple[dict, dict, bool]: """ Check whether data is provided by a tuple (key, data), @@ -784,8 +786,8 @@ def add_data_from_tuple_str_array( ValueError if the mixed-dimensional grid contains more than one subdomain """ - # Implementation if isinstance(pt, tuple[str, np.ndarray]. - isinstance_tuple_str_array = list(map(type, pt)) == [str, np.ndarray] + # Implementation if isinstance(data_pt, tuple[str, np.ndarray]. + isinstance_tuple_str_array = list(map(type, data_pt)) == [str, np.ndarray] # Convert data to unique format and update the interface data dictionary. if isinstance_tuple_str_array: @@ -795,15 +797,15 @@ def add_data_from_tuple_str_array( subdomains = self._mdg.subdomains() if not len(subdomains) == 1: raise ValueError( - f"""The data type used for {pt} is only + f"""The data type used for {data_pt} is only supported if the mixed-dimensional grid only contains a single subdomain.""" ) # Fetch remaining ingredients required to define subdomain data element sd = subdomains[0] - key = pt[0] - value = _toVectorFormat(pt[1], sd) + key = data_pt[0] + value = _toVectorFormat(data_pt[1], sd) # Add data point in correct format to collection subdomain_data[(sd, key)] = value @@ -846,9 +848,9 @@ def add_data_from_tuple_str_array( # For this, two separate dictionaries (for subdomain and interface data) # are used. The final format uses (subdomain/interface,key) as key of # the dictionaries, and the value is given by the corresponding data. - for pt in data: + for data_pt in data: - # Initialize tag storing whether the conversion process for pt is successful. + # Initialize tag storing whether the conversion process for data_pt is successful. success = False for method in methods: @@ -856,7 +858,7 @@ def add_data_from_tuple_str_array( # Check whether data point of right type and convert to # the unique data type. subdomain_data, interface_data, success = method( - pt, subdomain_data, interface_data + data_pt, subdomain_data, interface_data ) # Stop, once a supported data format has been detected. @@ -866,7 +868,7 @@ def add_data_from_tuple_str_array( # Check if provided data has non-supported data format. if not success: raise ValueError( - f"Input data {pt} has wrong format and cannot be exported." + f"Input data {data_pt} has wrong format and cannot be exported." ) return subdomain_data, interface_data @@ -1032,7 +1034,7 @@ def _export_data_vtu( # Get all geometrical entities of dimension dim: if is_subdomain_data: - entities = self._mdg.subdomains(dim=dim) + entities: list[Any] = self._mdg.subdomains(dim=dim) else: entities = self._mdg.interfaces(dim=dim) From 871cf25731c3f90cce8877b7d690e2509de10590 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 1 Jul 2022 13:39:21 +0200 Subject: [PATCH 74/83] STY: Mypy. Remove subscribing from dict. --- src/porepy/viz/exporter.py | 88 +++++++++++++++++++++++++------------- 1 file changed, 58 insertions(+), 30 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 3e2e66b9ab..18f3d02b4f 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -24,25 +24,18 @@ # for its storage, taking dimensions as inputs. Since 0d grids are # stored as 'None', allow for such values. Meshio_Geom = namedtuple("Meshio_Geom", ["pts", "connectivity", "cell_ids"]) -MD_Meshio_Geom = dict[int, Union[None, Meshio_Geom]] # All allowed data structures to define data for exporting DataInput = Union[ # Keys for states str, # Subdomain specific data types - tuple[list[pp.Grid], str], + tuple[list, str], tuple[pp.Grid, str, np.ndarray], - tuple[str, np.ndarray], - # Interface specific data types - tuple[list[pp.MortarGrid], str], tuple[pp.MortarGrid, str, np.ndarray], + tuple[str, np.ndarray], ] -# Data structure in which data is stored after preprocessing -SubdomainData = dict[tuple[pp.Grid, str], np.ndarray] -InterfaceData = dict[tuple[pp.MortarGrid, str], np.ndarray] - class Exporter: """ @@ -143,12 +136,12 @@ def __init__( # Generate infrastructure for storing fixed-dimensional grids in # meshio format. Include all but the 0-d grids self._dims = np.unique([sd.dim for sd in self._mdg.subdomains() if sd.dim > 0]) - self.meshio_geom: MD_Meshio_Geom = dict() + self.meshio_geom = dict() # Generate infrastructure for storing fixed-dimensional mortar grids # in meshio format. self._m_dims = np.unique([intf.dim for intf in self._mdg.interfaces()]) - self.m_meshio_geom: MD_Meshio_Geom = dict() + self.m_meshio_geom = dict() # Generate geometrical information in meshio format self._update_meshio_geom() @@ -465,7 +458,7 @@ def write_pvd( def _sort_and_unify_data( self, data=None, # ignore type which is essentially Union[DataInput, list[DataInput]] - ) -> tuple[SubdomainData, InterfaceData]: + ) -> tuple[dict, dict]: """ Preprocess data. @@ -480,8 +473,8 @@ def _sort_and_unify_data( of subdomains/interfaces. Returns: - tuple[SubdomainData, InterfaceData]: Subdomain and interface data decomposed and - brought into unified format. + dict: Subdomain data in unified format. + dict: Interface data in unified format. Raises: ValueError if the data type provided is not supported @@ -530,8 +523,6 @@ def _toVectorFormat( return value - # TODO rename pt to data_pt? - # TODO typing def add_data_from_str( data_pt, subdomain_data: dict, interface_data: dict ) -> tuple[dict, dict, bool]: @@ -739,6 +730,42 @@ def add_data_from_tuple_subdomain_str_array( def add_data_from_tuple_interface_str_array( data_pt, subdomain_data: dict, interface_data: dict + ) -> tuple[dict, dict, bool]: + """ + Check whether data is provided as tuple (intf, key, data), + where intf is a single interface, key is a string, and data is a user-defined + data array. This routine explicitly checks only for interface data. + + Translation of add_data_from_tuple_subdomain_str_array to interfaces. + """ + + # Implementation of isinstance(t, tuple[pp.MortarGrid, str, np.ndarray]). + isinstance_tuple_interface_str_array = isinstance( + data_pt[0], pp.MortarGrid + ) and list(map(type, data_pt))[1:] == [ + str, + np.ndarray, + ] + + # Convert data to unique format and update the interface data dictionary. + if isinstance_tuple_interface_str_array: + # Interpret (intf, key, value) = (data_pt[0], data_pt[1], data_pt[2]); + intf = data_pt[0] + key = data_pt[1] + value = _toVectorFormat(data_pt[2], intf) + + # Add data point in correct format to collection + interface_data[(intf, key)] = value + + # Return updated dictionaries and indicate succesful conversion. + return subdomain_data, interface_data, True + + else: + # Return original data dictionaries and indicate no modification. + return subdomain_data, interface_data, False + + def add_data_from_tuple_grid_str_array( + data_pt, subdomain_data: dict, interface_data: dict ) -> tuple[dict, dict, bool]: """ Check whether data is provided as tuple (g, key, data), @@ -749,11 +776,12 @@ def add_data_from_tuple_interface_str_array( """ # Implementation of isinstance(t, tuple[pp.MortarGrid, str, np.ndarray]). - isinstance_tuple_interface_str_array = list(map(type, data_pt)) == [ - tuple, + isinstance_tuple_interface_str_array = isinstance( + data_pt[0], pp.MortarGrid + ) and list(map(type, data_pt))[1:] == [ str, np.ndarray, - ] and isinstance(data_pt[0], pp.MortarGrid) + ] # Convert data to unique format and update the interface data dictionary. if isinstance_tuple_interface_str_array: @@ -829,8 +857,8 @@ def add_data_from_tuple_str_array( data = [data] # Initialize container for data associated to subdomains and interfaces - subdomain_data: SubdomainData = dict() - interface_data: InterfaceData = dict() + subdomain_data = dict() + interface_data = dict() # Define methods to be used for checking the data type and performing # the conversion. This list implicitly also defines which input data is @@ -975,7 +1003,7 @@ def _update_constant_mesh_data(self) -> None: def _export_data_vtu( self, - data: Union[SubdomainData, InterfaceData], + data: dict, time_step: Optional[int], **kwargs, ) -> None: @@ -988,7 +1016,7 @@ def _export_data_vtu( Analogously for interfaces. Args: - data (Union[SubdomainData, InterfaceData]): Subdomain or interface data. + data (dict): Subdomain or interface data. time_step (int): time_step to be used to append the file name kwargs: Optional keyword arguments: 'interface_data' (boolean) indicates whether data is associated to @@ -1209,12 +1237,12 @@ def _export_grid_1d(self, grids: Iterable[pp.Grid]) -> Meshio_Geom: cell_type = "line" # Dictionary storing cell->nodes connectivity information - cell_to_nodes: dict[str, np.ndarray] = {cell_type: np.empty((0, 2), dtype=int)} + cell_to_nodes: dict = {cell_type: np.empty((0, 2), dtype=int)} # Dictionary collecting all cell ids for each cell type. # Since each cell is a line, the list of cell ids is trivial total_num_cells = np.sum(np.array([grid.num_cells for grid in grids])) - cell_id: dict[str, list[int]] = {cell_type: [i for i in range(total_num_cells)]} + cell_id: dict = {cell_type: [i for i in range(total_num_cells)]} # Data structure for storing node coordinates of all 1d grids. num_pts = np.sum([grid.num_nodes for grid in grids]).astype(int) @@ -1314,9 +1342,9 @@ def _export_grid_2d(self, grids: Iterable[pp.Grid]) -> Meshio_Geom: # Dictionary storing cell->nodes connectivity information for all # cell types. For this, the nodes have to be sorted such that # they form a circular chain, describing the boundary of the cell. - cell_to_nodes: dict[str, np.ndarray] = {} + cell_to_nodes: dict = {} # Dictionary collecting all cell ids for each cell type. - cell_id: dict[str, list[int]] = {} + cell_id: dict = {} # Data structure for storing node coordinates of all 2d grids. num_pts = np.sum([grid.num_nodes for grid in grids]) @@ -1770,10 +1798,10 @@ def _export_polyhedron_3d(self, grids: Iterable[pp.Grid]) -> Meshio_Geom: # For the general geometric type "polyhedron", cell->faces->nodes will # be used to store connectivity information, which can become significantly # larger than cell->nodes information used for special type grids. - cell_to_faces: dict[str, list[list[int]]] = {} + cell_to_faces: dict = {} # Dictionary collecting all cell ids for each cell type. - cell_id: dict[str, list[int]] = {} + cell_id: dict = {} # Data structure for storing node coordinates of all 3d grids. num_pts = np.sum([grid.num_nodes for grid in grids]) @@ -1886,7 +1914,7 @@ def _write( ValueError if some data has wrong dimension """ # Initialize empty cell data dictinary - cell_data: dict[str, list[np.ndarray]] = {} + cell_data: dict = {} # Split the data for each group of geometrically uniform cells # Utilize meshio_geom for this. From 5f4497bd90ad9ae21ca7dacca0476406bae9ef57 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 1 Jul 2022 14:00:45 +0200 Subject: [PATCH 75/83] Revert "STY: Mypy. Remove subscribing from dict." This reverts commit 871cf25731c3f90cce8877b7d690e2509de10590. --- src/porepy/viz/exporter.py | 88 +++++++++++++------------------------- 1 file changed, 30 insertions(+), 58 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 18f3d02b4f..3e2e66b9ab 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -24,18 +24,25 @@ # for its storage, taking dimensions as inputs. Since 0d grids are # stored as 'None', allow for such values. Meshio_Geom = namedtuple("Meshio_Geom", ["pts", "connectivity", "cell_ids"]) +MD_Meshio_Geom = dict[int, Union[None, Meshio_Geom]] # All allowed data structures to define data for exporting DataInput = Union[ # Keys for states str, # Subdomain specific data types - tuple[list, str], + tuple[list[pp.Grid], str], tuple[pp.Grid, str, np.ndarray], - tuple[pp.MortarGrid, str, np.ndarray], tuple[str, np.ndarray], + # Interface specific data types + tuple[list[pp.MortarGrid], str], + tuple[pp.MortarGrid, str, np.ndarray], ] +# Data structure in which data is stored after preprocessing +SubdomainData = dict[tuple[pp.Grid, str], np.ndarray] +InterfaceData = dict[tuple[pp.MortarGrid, str], np.ndarray] + class Exporter: """ @@ -136,12 +143,12 @@ def __init__( # Generate infrastructure for storing fixed-dimensional grids in # meshio format. Include all but the 0-d grids self._dims = np.unique([sd.dim for sd in self._mdg.subdomains() if sd.dim > 0]) - self.meshio_geom = dict() + self.meshio_geom: MD_Meshio_Geom = dict() # Generate infrastructure for storing fixed-dimensional mortar grids # in meshio format. self._m_dims = np.unique([intf.dim for intf in self._mdg.interfaces()]) - self.m_meshio_geom = dict() + self.m_meshio_geom: MD_Meshio_Geom = dict() # Generate geometrical information in meshio format self._update_meshio_geom() @@ -458,7 +465,7 @@ def write_pvd( def _sort_and_unify_data( self, data=None, # ignore type which is essentially Union[DataInput, list[DataInput]] - ) -> tuple[dict, dict]: + ) -> tuple[SubdomainData, InterfaceData]: """ Preprocess data. @@ -473,8 +480,8 @@ def _sort_and_unify_data( of subdomains/interfaces. Returns: - dict: Subdomain data in unified format. - dict: Interface data in unified format. + tuple[SubdomainData, InterfaceData]: Subdomain and interface data decomposed and + brought into unified format. Raises: ValueError if the data type provided is not supported @@ -523,6 +530,8 @@ def _toVectorFormat( return value + # TODO rename pt to data_pt? + # TODO typing def add_data_from_str( data_pt, subdomain_data: dict, interface_data: dict ) -> tuple[dict, dict, bool]: @@ -730,42 +739,6 @@ def add_data_from_tuple_subdomain_str_array( def add_data_from_tuple_interface_str_array( data_pt, subdomain_data: dict, interface_data: dict - ) -> tuple[dict, dict, bool]: - """ - Check whether data is provided as tuple (intf, key, data), - where intf is a single interface, key is a string, and data is a user-defined - data array. This routine explicitly checks only for interface data. - - Translation of add_data_from_tuple_subdomain_str_array to interfaces. - """ - - # Implementation of isinstance(t, tuple[pp.MortarGrid, str, np.ndarray]). - isinstance_tuple_interface_str_array = isinstance( - data_pt[0], pp.MortarGrid - ) and list(map(type, data_pt))[1:] == [ - str, - np.ndarray, - ] - - # Convert data to unique format and update the interface data dictionary. - if isinstance_tuple_interface_str_array: - # Interpret (intf, key, value) = (data_pt[0], data_pt[1], data_pt[2]); - intf = data_pt[0] - key = data_pt[1] - value = _toVectorFormat(data_pt[2], intf) - - # Add data point in correct format to collection - interface_data[(intf, key)] = value - - # Return updated dictionaries and indicate succesful conversion. - return subdomain_data, interface_data, True - - else: - # Return original data dictionaries and indicate no modification. - return subdomain_data, interface_data, False - - def add_data_from_tuple_grid_str_array( - data_pt, subdomain_data: dict, interface_data: dict ) -> tuple[dict, dict, bool]: """ Check whether data is provided as tuple (g, key, data), @@ -776,12 +749,11 @@ def add_data_from_tuple_grid_str_array( """ # Implementation of isinstance(t, tuple[pp.MortarGrid, str, np.ndarray]). - isinstance_tuple_interface_str_array = isinstance( - data_pt[0], pp.MortarGrid - ) and list(map(type, data_pt))[1:] == [ + isinstance_tuple_interface_str_array = list(map(type, data_pt)) == [ + tuple, str, np.ndarray, - ] + ] and isinstance(data_pt[0], pp.MortarGrid) # Convert data to unique format and update the interface data dictionary. if isinstance_tuple_interface_str_array: @@ -857,8 +829,8 @@ def add_data_from_tuple_str_array( data = [data] # Initialize container for data associated to subdomains and interfaces - subdomain_data = dict() - interface_data = dict() + subdomain_data: SubdomainData = dict() + interface_data: InterfaceData = dict() # Define methods to be used for checking the data type and performing # the conversion. This list implicitly also defines which input data is @@ -1003,7 +975,7 @@ def _update_constant_mesh_data(self) -> None: def _export_data_vtu( self, - data: dict, + data: Union[SubdomainData, InterfaceData], time_step: Optional[int], **kwargs, ) -> None: @@ -1016,7 +988,7 @@ def _export_data_vtu( Analogously for interfaces. Args: - data (dict): Subdomain or interface data. + data (Union[SubdomainData, InterfaceData]): Subdomain or interface data. time_step (int): time_step to be used to append the file name kwargs: Optional keyword arguments: 'interface_data' (boolean) indicates whether data is associated to @@ -1237,12 +1209,12 @@ def _export_grid_1d(self, grids: Iterable[pp.Grid]) -> Meshio_Geom: cell_type = "line" # Dictionary storing cell->nodes connectivity information - cell_to_nodes: dict = {cell_type: np.empty((0, 2), dtype=int)} + cell_to_nodes: dict[str, np.ndarray] = {cell_type: np.empty((0, 2), dtype=int)} # Dictionary collecting all cell ids for each cell type. # Since each cell is a line, the list of cell ids is trivial total_num_cells = np.sum(np.array([grid.num_cells for grid in grids])) - cell_id: dict = {cell_type: [i for i in range(total_num_cells)]} + cell_id: dict[str, list[int]] = {cell_type: [i for i in range(total_num_cells)]} # Data structure for storing node coordinates of all 1d grids. num_pts = np.sum([grid.num_nodes for grid in grids]).astype(int) @@ -1342,9 +1314,9 @@ def _export_grid_2d(self, grids: Iterable[pp.Grid]) -> Meshio_Geom: # Dictionary storing cell->nodes connectivity information for all # cell types. For this, the nodes have to be sorted such that # they form a circular chain, describing the boundary of the cell. - cell_to_nodes: dict = {} + cell_to_nodes: dict[str, np.ndarray] = {} # Dictionary collecting all cell ids for each cell type. - cell_id: dict = {} + cell_id: dict[str, list[int]] = {} # Data structure for storing node coordinates of all 2d grids. num_pts = np.sum([grid.num_nodes for grid in grids]) @@ -1798,10 +1770,10 @@ def _export_polyhedron_3d(self, grids: Iterable[pp.Grid]) -> Meshio_Geom: # For the general geometric type "polyhedron", cell->faces->nodes will # be used to store connectivity information, which can become significantly # larger than cell->nodes information used for special type grids. - cell_to_faces: dict = {} + cell_to_faces: dict[str, list[list[int]]] = {} # Dictionary collecting all cell ids for each cell type. - cell_id: dict = {} + cell_id: dict[str, list[int]] = {} # Data structure for storing node coordinates of all 3d grids. num_pts = np.sum([grid.num_nodes for grid in grids]) @@ -1914,7 +1886,7 @@ def _write( ValueError if some data has wrong dimension """ # Initialize empty cell data dictinary - cell_data: dict = {} + cell_data: dict[str, list[np.ndarray]] = {} # Split the data for each group of geometrically uniform cells # Utilize meshio_geom for this. From 8299385e807700102df7e09ceb397f32f70df974 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 1 Jul 2022 14:02:49 +0200 Subject: [PATCH 76/83] MAINT: Add annotations --- src/porepy/viz/exporter.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 3e2e66b9ab..60125ded30 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -7,6 +7,8 @@ time steps, a single pvd file takes care of the ordering of all printed vtu files. """ +from __future__ import annotations + import os import sys from collections import namedtuple From c42b5be6603fef7549d869aac0307c87ee4e02b2 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 1 Jul 2022 14:04:56 +0200 Subject: [PATCH 77/83] BUG: Use updated name for standard grids. --- tests/unit/test_vtk.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/tests/unit/test_vtk.py b/tests/unit/test_vtk.py index e561c7b890..a22663789c 100644 --- a/tests/unit/test_vtk.py +++ b/tests/unit/test_vtk.py @@ -89,14 +89,6 @@ def test_single_subdomain_2d_polytop(self): sd.compute_geometry() pp.coarsening.generate_coarse_grid(sd, [0, 1, 3, 3, 1, 1, 2, 2]) sd.compute_geometry() -# TODO try out both Structured TriangleGrid and CartGrid -#======= -# def test_single_subdomain_2d_polytop_grid(self): -# sd = pp.CartGrid([3, 2], [1] * 2) -# sd.compute_geometry() -# pp.coarsening.generate_coarse_grid(sd, [0, 0, 1, 0, 1, 1]) -# sd.compute_geometry() -#>>>>>>> upstream/grid_tree dummy_scalar = np.ones(sd.num_cells) * sd.dim dummy_vector = np.ones((3, sd.num_cells)) * sd.dim @@ -258,7 +250,7 @@ def test_mdg_2(self): self.assertTrue(content == self._mdg_2_mortar_grid_1_vtu()) def test_mdg_3(self): - mdg, _ = pp.grid_buckets_2d.two_intersecting( + mdg, _ = pp.mdi_grids_2d.two_intersecting( [4, 4], y_endpoints=[0.25, 0.75], simplex=False ) From d2b43e5e3d960a94d74c23c34c7ff33d53f7c473 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 1 Jul 2022 14:06:49 +0200 Subject: [PATCH 78/83] MAINT: Remove redundant comments. --- tests/unit/test_vtk.py | 49 +++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 24 deletions(-) diff --git a/tests/unit/test_vtk.py b/tests/unit/test_vtk.py index a22663789c..aae99f9f7d 100644 --- a/tests/unit/test_vtk.py +++ b/tests/unit/test_vtk.py @@ -13,6 +13,7 @@ import porepy as pp + class MeshioExporterTest(unittest.TestCase): def __init__(self, methodName="runTest"): unittest.TestCase.__init__(self, methodName) @@ -36,7 +37,7 @@ def test_single_subdomain_1d(self): self.file_name, self.folder, binary=False, - export_constants_separately = False + export_constants_separately=False, ) save.write_vtu([("dummy_scalar", dummy_scalar), ("dummy_vector", dummy_vector)]) @@ -56,7 +57,7 @@ def test_single_subdomain_2d_simplex_grid(self): self.file_name, self.folder, binary=False, - export_constants_separately = False + export_constants_separately=False, ) save.write_vtu([("dummy_scalar", dummy_scalar), ("dummy_vector", dummy_vector)]) @@ -76,7 +77,7 @@ def test_single_subdomain_2d_cart_grid(self): self.file_name, self.folder, binary=False, - export_constants_separately = False + export_constants_separately=False, ) save.write_vtu([("dummy_scalar", dummy_scalar), ("dummy_vector", dummy_vector)]) @@ -98,7 +99,7 @@ def test_single_subdomain_2d_polytop(self): self.file_name, self.folder, binary=False, - export_constants_separately = False + export_constants_separately=False, ) save.write_vtu([("dummy_scalar", dummy_scalar), ("dummy_vector", dummy_vector)]) @@ -118,7 +119,7 @@ def test_single_subdomain_3d_simplex_grid(self): self.file_name, self.folder, binary=False, - export_constants_separately = False + export_constants_separately=False, ) save.write_vtu([("dummy_scalar", dummy_scalar), ("dummy_vector", dummy_vector)]) @@ -138,7 +139,7 @@ def test_single_subdomain_3d_cart_grid(self): self.file_name, self.folder, binary=False, - export_constants_separately = False + export_constants_separately=False, ) save.write_vtu([("dummy_scalar", dummy_scalar), ("dummy_vector", dummy_vector)]) @@ -162,7 +163,7 @@ def test_single_subdomain_3d_polytop_grid(self): self.file_name, self.folder, binary=False, - export_constants_separately = False + export_constants_separately=False, ) save.write_vtu([("dummy_scalar", dummy_scalar), ("dummy_vector", dummy_vector)]) @@ -188,7 +189,7 @@ def test_mdg_1(self): self.file_name, self.folder, binary=False, - export_constants_separately = False + export_constants_separately=False, ) save.write_vtu(["dummy_scalar", "dummy_vector"]) @@ -233,7 +234,7 @@ def test_mdg_2(self): self.file_name, self.folder, binary=False, - export_constants_separately = False + export_constants_separately=False, ) save.write_vtu(["dummy_scalar", "dummy_vector", "unique_dummy_scalar"]) @@ -272,24 +273,26 @@ def test_mdg_3(self): }, ) - subdomains_1d = mdg.subdomains(dim=1) - subdomains_2d = mdg.subdomains(dim=2) + subdomains_1d = mdg.subdomains(dim=1) + subdomains_2d = mdg.subdomains(dim=2) sd_2d = subdomains_2d[0] - #interfaces_1d = mdg.interfaces(dim=1) # FIXME not used below + # interfaces_1d = mdg.interfaces(dim=1) # FIXME not used below save = pp.Exporter( mdg, self.file_name, self.folder, binary=False, - export_constants_separately = False + export_constants_separately=False, + ) + save.write_vtu( + [ + (subdomains_1d, "dummy_scalar"), + "dummy_vector", + "unique_dummy_scalar", + (sd_2d, "cc", sd_2d.cell_centers), + ] ) - save.write_vtu([ - (subdomains_1d, "dummy_scalar"), - "dummy_vector", - "unique_dummy_scalar", - (sd_2d, "cc", sd_2d.cell_centers) - ]) with open(self.folder + self.file_name + "_1.vtu", "r") as content_file: content = self.sliceout(content_file.read()) @@ -323,12 +326,12 @@ def test_constant_data(self): content = self.sliceout(content_file.read()) self.assertTrue(content == self._nonconstant_data_grid_vtu()) - with open(self.folder + self.file_name + "_constant_2.vtu", "r") as content_file: + with open( + self.folder + self.file_name + "_constant_2.vtu", "r" + ) as content_file: content = self.sliceout(content_file.read()) self.assertTrue(content == self._constant_data_grid_vtu()) - # NOTE: This test is not directly testing Exporter, but the capability to export networks. - # Is this a used capability? If not, suggest removing the test and the corresponding capability. def test_fractures_2d(self): p = np.array([[0, 2, 1, 2, 1], [0, 0, 0, 1, 2]]) e = np.array([[0, 2, 3], [1, 3, 4]]) @@ -350,8 +353,6 @@ def test_fractures_2d(self): content = self.sliceout(content_file.read()) self.assertTrue(content == self._test_fractures_2d_vtu()) - # NOTE: This test is not directly testing Exporter, but the capability to export networks. - # Is this a used capability? If not, suggest removing the test and the corresponding capability. def test_fractures_3d(self): f_1 = pp.Fracture(np.array([[0, 1, 2, 0], [0, 0, 1, 1], [0, 0, 1, 1]])) f_2 = pp.Fracture( From a00f7f806c5c5e3542f7021e2dbad95cb0a10e3f Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 1 Jul 2022 14:16:02 +0200 Subject: [PATCH 79/83] FIX: Use correct name for file. --- tests/unit/test_vtk.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/test_vtk.py b/tests/unit/test_vtk.py index aae99f9f7d..dfc76e2d70 100644 --- a/tests/unit/test_vtk.py +++ b/tests/unit/test_vtk.py @@ -251,7 +251,7 @@ def test_mdg_2(self): self.assertTrue(content == self._mdg_2_mortar_grid_1_vtu()) def test_mdg_3(self): - mdg, _ = pp.mdi_grids_2d.two_intersecting( + mdg, _ = pp.md_grids_2d.two_intersecting( [4, 4], y_endpoints=[0.25, 0.75], simplex=False ) From cb21ee6f45d786a87841c73997ecb80bc882adf6 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 1 Jul 2022 14:18:16 +0200 Subject: [PATCH 80/83] MAINT: Add typing to routine using numba --- src/porepy/viz/exporter.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index 60125ded30..e6e134adc2 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -114,7 +114,6 @@ def __init__( Raises: TypeError if grid has other type than pp.Grid or pp.MixedDimensional grid TypeError if kwargs contains unexpected keyword arguments - NotImplementedError if numba is not installed """ # Exporter is operating on mixed-dimensional grids. Convert to mixed-dimensional # grids if subdomain grid is provided. @@ -173,12 +172,6 @@ def __init__( # Parameter to be used in several occasions for adding time stamps. self._padding = 6 - # Require numba - if "numba" not in sys.modules: - raise NotImplementedError( - "The sorting algorithm requires numba to be installed." - ) - def add_constant_data( self, data: Optional[Union[DataInput, list[DataInput]]] = None, @@ -1671,11 +1664,22 @@ def _export_hexahedron_3d(self, grids: Iterable[pp.Grid]) -> Meshio_Geom: def _test_hex_meshio_format( self, nodes: np.ndarray, cn_indices: np.ndarray ) -> bool: + """Test whether the node numbering for each cell complies + with the hardcoded numbering used by meshio, cf. documentation + of meshio. + + Raises: + ImportError if numba is not installed + """ - import numba + try: + import numba + except ImportError: + raise ImportError("Numba not available on the system") - @numba.jit(nopython=True) - def _function_to_compile(nodes: np.ndarray, cn_indices: np.ndarray) -> bool: + # Just in time compilation + @numba.njit("b1(f8[:,:], i4[:,:])", cache=True) + def _function_to_compile(nodes, cn_indices): """ Test whether the node numbering for each cell complies with the hardcoded numbering used by meshio, cf. documentation From 71e8c3fcd9291e1b310968bfedee8d29c59b96bf Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 1 Jul 2022 14:24:55 +0200 Subject: [PATCH 81/83] MAINT: Add typing to sort points when using numba --- src/porepy/utils/sort_points.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/porepy/utils/sort_points.py b/src/porepy/utils/sort_points.py index e76e4364ed..4ae918f11a 100644 --- a/src/porepy/utils/sort_points.py +++ b/src/porepy/utils/sort_points.py @@ -119,13 +119,19 @@ def sort_multiple_point_pairs(lines: np.ndarray) -> np.ndarray: Returns: np.ndarray: Sorted version of lines, where for each chain, the collection - of line segments has been potentially flipped and sorted. + of l + + Raises: + ImportError ifine segments has been potentially flipped and sorted. """ - import numba + try: + import numba + except ImportError: + raise ImportError("Numba not available on the system") - @numba.jit(nopython=True) - def _function_to_compile(lines: np.ndarray) -> np.ndarray: + @numba.njit("f8[:,:](i4[:,:])", cache=True) + def _function_to_compile(lines): """ Copy of pp.utils.sort_points.sort_point_pairs. This version is extended to multiple chains. Each chain is implicitly assumed to be circular. From 96ffa9ce8e75d005262855961ee0d87dae625eec Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 1 Jul 2022 14:26:41 +0200 Subject: [PATCH 82/83] MAINT: Make numba routine parallel for testing ordering of nodes --- src/porepy/viz/exporter.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index e6e134adc2..c87e86d997 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -1678,7 +1678,7 @@ def _test_hex_meshio_format( raise ImportError("Numba not available on the system") # Just in time compilation - @numba.njit("b1(f8[:,:], i4[:,:])", cache=True) + @numba.njit("b1(f8[:,:], i4[:,:])", cache=True, parallel=True) def _function_to_compile(nodes, cn_indices): """ Test whether the node numbering for each cell complies @@ -1695,7 +1695,7 @@ def _function_to_compile(nodes, cn_indices): correct_format = True # Test each cell separately - for i in range(cn_indices.shape[0]): + for i in numba.prange(cn_indices.shape[0]): # Assume initially that the cell is a hex is_hex = True From 66dec105339457d7bdc46de50d37f7a33fb8e771 Mon Sep 17 00:00:00 2001 From: Jakub Both Date: Fri, 1 Jul 2022 14:45:12 +0200 Subject: [PATCH 83/83] Fix: Use typing module instead of built-in typing, required for python 3.8 --- src/porepy/viz/exporter.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/porepy/viz/exporter.py b/src/porepy/viz/exporter.py index c87e86d997..fbe364a68b 100644 --- a/src/porepy/viz/exporter.py +++ b/src/porepy/viz/exporter.py @@ -12,7 +12,7 @@ import os import sys from collections import namedtuple -from typing import Any, Iterable, Optional, Union +from typing import Any, Dict, Iterable, List, Optional, Tuple, Union import meshio import numpy as np @@ -26,24 +26,24 @@ # for its storage, taking dimensions as inputs. Since 0d grids are # stored as 'None', allow for such values. Meshio_Geom = namedtuple("Meshio_Geom", ["pts", "connectivity", "cell_ids"]) -MD_Meshio_Geom = dict[int, Union[None, Meshio_Geom]] +MD_Meshio_Geom = Dict[int, Union[None, Meshio_Geom]] # All allowed data structures to define data for exporting DataInput = Union[ # Keys for states str, # Subdomain specific data types - tuple[list[pp.Grid], str], - tuple[pp.Grid, str, np.ndarray], - tuple[str, np.ndarray], + Tuple[List[pp.Grid], str], + Tuple[pp.Grid, str, np.ndarray], + Tuple[str, np.ndarray], # Interface specific data types - tuple[list[pp.MortarGrid], str], - tuple[pp.MortarGrid, str, np.ndarray], + Tuple[List[pp.MortarGrid], str], + Tuple[pp.MortarGrid, str, np.ndarray], ] # Data structure in which data is stored after preprocessing -SubdomainData = dict[tuple[pp.Grid, str], np.ndarray] -InterfaceData = dict[tuple[pp.MortarGrid, str], np.ndarray] +SubdomainData = Dict[Tuple[pp.Grid, str], np.ndarray] +InterfaceData = Dict[Tuple[pp.MortarGrid, str], np.ndarray] class Exporter: