diff --git a/driver/pace/driver/driver.py b/driver/pace/driver/driver.py index 95f6121aa..ff9ff08f1 100644 --- a/driver/pace/driver/driver.py +++ b/driver/pace/driver/driver.py @@ -594,9 +594,9 @@ def _critical_path_step_all( self.end_of_step_update( dycore_state=self.state.dycore_state, phy_state=self.state.physics_state, - u_dt=self.state.tendency_state.u_dt.storage, - v_dt=self.state.tendency_state.v_dt.storage, - pt_dt=self.state.tendency_state.pt_dt.storage, + u_dt=self.state.tendency_state.u_dt.data, + v_dt=self.state.tendency_state.v_dt.data, + pt_dt=self.state.tendency_state.pt_dt.data, dt=float(dt), ) self._end_of_step_actions(step) diff --git a/dsl/pace/dsl/dace/orchestration.py b/dsl/pace/dsl/dace/orchestration.py index 6c0f62837..facf4bf28 100644 --- a/dsl/pace/dsl/dace/orchestration.py +++ b/dsl/pace/dsl/dace/orchestration.py @@ -36,6 +36,12 @@ from pace.util.mpi import MPI +try: + import cupy as cp +except ImportError: + cp = None + + def dace_inhibitor(func: Callable): """Triggers callback generation wrapping `func` while doing DaCe parsing.""" return func @@ -43,9 +49,9 @@ def dace_inhibitor(func: Callable): def _upload_to_device(host_data: List[Any]): """Make sure any data that are still a gt4py.storage gets uploaded to device""" - for data in host_data: - if isinstance(data, gt4py.storage.Storage): - data.host_to_device() + for i, data in enumerate(host_data): + if isinstance(data, cp.ndarray): + host_data[i] = cp.asarray(data) def _download_results_from_dace( @@ -55,9 +61,7 @@ def _download_results_from_dace( gt4py_results = None if dace_result is not None: for arg in args: - if isinstance(arg, gt4py.storage.Storage) and hasattr( - arg, "_set_device_modified" - ): + if isinstance(arg, cp.ndarray) and hasattr(arg, "_set_device_modified"): arg._set_device_modified() if config.is_gpu_backend(): gt4py_results = [ diff --git a/dsl/pace/dsl/gt4py_utils.py b/dsl/pace/dsl/gt4py_utils.py index d21db1586..325b5009f 100644 --- a/dsl/pace/dsl/gt4py_utils.py +++ b/dsl/pace/dsl/gt4py_utils.py @@ -1,9 +1,8 @@ import logging from functools import wraps -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union -import gt4py.backend -import gt4py.storage as gt_storage +import gt4py import numpy as np from pace.dsl.typing import DTypes, Field, Float, FloatField @@ -50,6 +49,30 @@ def wrapper(*args, **kwargs) -> Any: return inner +def _mask_to_dimensions( + mask: Tuple[bool, ...], shape: Sequence[int] +) -> List[Union[str, int]]: + assert len(mask) == 3 + dimensions: List[Union[str, int]] = [] + for i, axis in enumerate(("I", "J", "K")): + if mask[i]: + dimensions.append(axis) + offset = int(sum(mask)) + dimensions.extend(shape[offset:]) + return dimensions + + +def _interpolate_origin(origin: Tuple[int, ...], mask: Tuple[bool, ...]) -> List[int]: + assert len(mask) == 3 + final_origin: List[int] = [] + for i, has_axis in enumerate(mask): + if has_axis: + final_origin.append(origin[i]) + + final_origin.extend(origin[len(mask) :]) + return final_origin + + def make_storage_data( data: Field, shape: Optional[Tuple[int, ...]] = None, @@ -118,6 +141,11 @@ def make_storage_data( default_mask = (n_dims * (True,)) + ((max_dim - n_dims) * (False,)) mask = default_mask + # Convert to `dimensions` which is the new parameter type that gt4py accepts. + zip( + shape, + ) + if n_dims == 1: data = _make_storage_data_1d( data, shape, start, dummy, axis, read_only, backend=backend @@ -129,14 +157,12 @@ def make_storage_data( else: data = _make_storage_data_3d(data, shape, start, backend=backend) - storage = gt_storage.from_array( - data=data, + storage = gt4py.storage.from_array( + data, + dtype, backend=backend, - default_origin=origin, - shape=shape, - dtype=dtype, - mask=mask, - managed_memory=managed_memory, + aligned_index=_interpolate_origin(origin, mask), + dimensions=_mask_to_dimensions(mask, data.shape), ) return storage @@ -264,13 +290,12 @@ def make_storage_from_shape( mask = (False, False, True) # Assume 1D is a k-field else: mask = (n_dims * (True,)) + ((3 - n_dims) * (False,)) - storage = gt_storage.zeros( + storage = gt4py.storage.zeros( + shape, + dtype, backend=backend, - default_origin=origin, - shape=shape, - dtype=dtype, - mask=mask, - managed_memory=managed_memory, + aligned_index=_interpolate_origin(origin, mask), + dimensions=_mask_to_dimensions(mask, shape), ) return storage @@ -340,8 +365,6 @@ def k_split_run(func, data, k_indices, splitvars_values): def asarray(array, to_type=np.ndarray, dtype=None, order=None): - if isinstance(array, gt_storage.storage.Storage): - array = array.data if cp and (isinstance(array, list)): if to_type is np.ndarray: order = "F" if order is None else order @@ -379,19 +402,15 @@ def is_gpu_backend(backend: str) -> bool: def zeros(shape, dtype=Float, *, backend: str): storage_type = cp.ndarray if is_gpu_backend(backend) else np.ndarray xp = cp if cp and storage_type is cp.ndarray else np - return xp.zeros(shape) + return xp.zeros(shape, dtype=dtype) def sum(array, axis=None, dtype=Float, out=None, keepdims=False): - if isinstance(array, gt_storage.storage.Storage): - array = array.data xp = cp if cp and type(array) is cp.ndarray else np return xp.sum(array, axis, dtype, out, keepdims) def repeat(array, repeats, axis=None): - if isinstance(array, gt_storage.storage.Storage): - array = array.data xp = cp if cp and type(array) is cp.ndarray else np return xp.repeat(array, repeats, axis) @@ -401,22 +420,16 @@ def index(array, key): def moveaxis(array, source: int, destination: int): - if isinstance(array, gt_storage.storage.Storage): - array = array.data xp = cp if cp and type(array) is cp.ndarray else np return xp.moveaxis(array, source, destination) def tile(array, reps: Union[int, Tuple[int, ...]]): - if isinstance(array, gt_storage.storage.Storage): - array = array.data xp = cp if cp and type(array) is cp.ndarray else np return xp.tile(array, reps) def squeeze(array, axis: Union[int, Tuple[int]] = None): - if isinstance(array, gt_storage.storage.Storage): - array = array.data xp = cp if cp and type(array) is cp.ndarray else np return xp.squeeze(array, axis) @@ -444,8 +457,6 @@ def unique( return_counts: bool = False, axis: Union[int, Tuple[int]] = None, ): - if isinstance(array, gt_storage.storage.Storage): - array = array.data xp = cp if cp and type(array) is cp.ndarray else np return xp.unique(array, return_index, return_inverse, return_counts, axis) @@ -453,8 +464,6 @@ def unique( def stack(tup, axis: int = 0, out=None): array_tup = [] for array in tup: - if isinstance(array, gt_storage.storage.Storage): - array = array.data array_tup.append(array) xp = cp if cp and type(array_tup[0]) is cp.ndarray else np return xp.stack(array_tup, axis, out) diff --git a/dsl/pace/dsl/stencil.py b/dsl/pace/dsl/stencil.py index 889946ebf..8b62420a6 100644 --- a/dsl/pace/dsl/stencil.py +++ b/dsl/pace/dsl/stencil.py @@ -20,7 +20,6 @@ import gt4py import numpy as np from gt4py import gtscript -from gt4py.storage.storage import Storage from gtc.passes.oir_pipeline import DefaultPipeline, OirPipeline import pace.dsl.gt4py_utils as gt4py_utils @@ -34,6 +33,12 @@ from pace.util.mpi import MPI +try: + import cupy as cp +except ImportError: + cp = np + + def report_difference(args, kwargs, args_copy, kwargs_copy, function_name, gt_id): report_head = f"comparing against numpy for func {function_name}, gt_id {gt_id}:" report_segments = [] @@ -431,7 +436,7 @@ def __call__(self, *args, **kwargs) -> None: f"after calling {self._func_name}" ) - def _mark_cuda_fields_written(self, fields: Mapping[str, Storage]): + def _mark_cuda_fields_written(self, fields: Mapping[str, cp.ndarray]): if self.stencil_config.is_gpu_backend: for write_field in self._written_fields: fields[write_field]._set_device_modified() @@ -520,15 +525,11 @@ def closure_resolver(self, constant_args, given_args, parent_closure=None): def _convert_quantities_to_storage(args, kwargs): for i, arg in enumerate(args): - try: - args[i] = arg.storage - except AttributeError: - pass + if isinstance(arg, pace.util.Quantity): + args[i] = arg.data for name, arg in kwargs.items(): - try: - kwargs[name] = arg.storage - except AttributeError: - pass + if isinstance(arg, pace.util.Quantity): + kwargs[name] = arg.data class GridIndexing: diff --git a/fv3core/examples/standalone/runfile/acoustics.py b/fv3core/examples/standalone/runfile/acoustics.py index e0bd17146..4f898e05c 100755 --- a/fv3core/examples/standalone/runfile/acoustics.py +++ b/fv3core/examples/standalone/runfile/acoustics.py @@ -62,13 +62,10 @@ def get_state_from_input( ) -> Dict[str, SimpleNamespace]: """ Transforms the input data from the dictionary of strings - to arrays into a state we can pass in - - Input is a dict of arrays. These are transformed into Storage arrays - useable in GT4Py + to arrays into a state we can pass in. This will also take care of reshaping the arrays into same sized - fields as required by the acoustics + fields as required by the acoustics. """ driver_object = TranslateDynCore([grid], namelist, stencil_config) driver_object._base.make_storage_data_input_vars(input_data) diff --git a/fv3core/pace/fv3core/initialization/dycore_state.py b/fv3core/pace/fv3core/initialization/dycore_state.py index 961e1c8ae..51dc7e9ab 100644 --- a/fv3core/pace/fv3core/initialization/dycore_state.py +++ b/fv3core/pace/fv3core/initialization/dycore_state.py @@ -305,7 +305,7 @@ def init_zeros(cls, quantity_factory: pace.util.QuantityFactory): if "dims" in _field.metadata.keys(): initial_storages[_field.name] = quantity_factory.zeros( _field.metadata["dims"], _field.metadata["units"], dtype=float - ).storage + ).data return cls.init_from_storages( storages=initial_storages, sizer=quantity_factory.sizer ) diff --git a/physics/pace/physics/physics_state.py b/physics/pace/physics/physics_state.py index 7499c393d..148546467 100644 --- a/physics/pace/physics/physics_state.py +++ b/physics/pace/physics/physics_state.py @@ -317,14 +317,14 @@ def __post_init__( @classmethod def init_zeros(cls, quantity_factory, active_packages: List[str]) -> "PhysicsState": - initial_storages = {} + initial_arrays = {} for _field in fields(cls): if "dims" in _field.metadata.keys(): - initial_storages[_field.name] = quantity_factory.zeros( + initial_arrays[_field.name] = quantity_factory.zeros( _field.metadata["dims"], _field.metadata["units"], dtype=float - ).storage + ).data return cls( - **initial_storages, + **initial_arrays, quantity_factory=quantity_factory, active_packages=active_packages, ) diff --git a/stencils/pace/stencils/testing/temporaries.py b/stencils/pace/stencils/testing/temporaries.py index 2702cc4d3..581387f6d 100644 --- a/stencils/pace/stencils/testing/temporaries.py +++ b/stencils/pace/stencils/testing/temporaries.py @@ -1,7 +1,6 @@ import copy from typing import List -import gt4py import numpy as np import pace.util @@ -15,7 +14,7 @@ def copy_temporaries(obj, max_depth: int) -> dict: attr = getattr(obj, attr_name) except AttributeError: attr = None - if isinstance(attr, (gt4py.storage.storage.Storage, pace.util.Quantity)): + if isinstance(attr, pace.util.Quantity): temporaries[attr_name] = copy.deepcopy(np.asarray(attr.data)) elif attr.__class__.__module__.split(".")[0] in ( # type: ignore "fv3core", diff --git a/tests/main/driver/test_restart_serial.py b/tests/main/driver/test_restart_serial.py index 6cd6d0337..c051ad683 100644 --- a/tests/main/driver/test_restart_serial.py +++ b/tests/main/driver/test_restart_serial.py @@ -2,7 +2,6 @@ import shutil from datetime import datetime -import gt4py import numpy as np import xarray as xr import yaml @@ -114,7 +113,7 @@ def test_restart_save_to_disk(): for var in driver_state.physics_state.__dict__.keys(): if isinstance( driver_state.physics_state.__dict__[var], - gt4py.storage.storage.CPUStorage, + np.ndarray, ): np.testing.assert_allclose( driver_state.physics_state.__dict__[var].data, diff --git a/tests/main/dsl/test_stencil.py b/tests/main/dsl/test_stencil.py index 4b6477484..4661139d3 100644 --- a/tests/main/dsl/test_stencil.py +++ b/tests/main/dsl/test_stencil.py @@ -11,15 +11,13 @@ def _make_storage( stencil_config: pace.dsl.StencilConfig, *, dtype=float, - mask=None, - default_origin=(0, 0, 0), + aligned_index=(0, 0, 0), ): return func( backend=stencil_config.compilation_config.backend, shape=grid_indexing.domain, dtype=dtype, - mask=mask, - default_origin=default_origin, + aligned_index=aligned_index, ) diff --git a/tests/main/dsl/test_stencil_wrapper.py b/tests/main/dsl/test_stencil_wrapper.py index 65f5a89ba..232395867 100644 --- a/tests/main/dsl/test_stencil_wrapper.py +++ b/tests/main/dsl/test_stencil_wrapper.py @@ -315,7 +315,7 @@ def test_convert_quantities_to_storage_one_arg_quantity(): kwargs = {} _convert_quantities_to_storage(args, kwargs) assert len(args) == 1 - assert args[0] == quantity.storage + assert args[0] == quantity.data assert len(kwargs) == 0 @@ -326,7 +326,7 @@ def test_convert_quantities_to_storage_one_kwarg_quantity(): _convert_quantities_to_storage(args, kwargs) assert len(args) == 0 assert len(kwargs) == 1 - assert kwargs["val"] == quantity.storage + assert kwargs["val"] == quantity.data def test_convert_quantities_to_storage_one_arg_nonquantity(): diff --git a/tests/main/fv3core/test_dycore_call.py b/tests/main/fv3core/test_dycore_call.py index a75e26dbd..8e5298af3 100644 --- a/tests/main/fv3core/test_dycore_call.py +++ b/tests/main/fv3core/test_dycore_call.py @@ -193,8 +193,8 @@ def test_call_does_not_allocate_storages(): def error_func(*args, **kwargs): raise AssertionError("call not allowed") - with unittest.mock.patch("gt4py.storage.storage.zeros", new=error_func): - with unittest.mock.patch("gt4py.storage.storage.empty", new=error_func): + with unittest.mock.patch("gt4py.storage.zeros", new=error_func): + with unittest.mock.patch("gt4py.storage.empty", new=error_func): dycore.step_dynamics(state, timer) diff --git a/tests/main/physics/test_integration.py b/tests/main/physics/test_integration.py index c0d504937..57ca28248 100644 --- a/tests/main/physics/test_integration.py +++ b/tests/main/physics/test_integration.py @@ -70,8 +70,8 @@ def setup_physics(): for field in fields(pace.physics.PhysicsState): array = getattr(physics_state, field.name) # check that it's a storage this way, because Field is not a class - if hasattr(array, "data"): - array.data[:] = random.uniform(-1, 1, size=array.data.shape) + if isinstance(array, np.ndarray): + array[:] = random.uniform(-1, 1, size=array.data.shape) return physics, physics_state diff --git a/util/HISTORY.md b/util/HISTORY.md index b88c6d906..5b8dda777 100644 --- a/util/HISTORY.md +++ b/util/HISTORY.md @@ -23,6 +23,7 @@ Minor changes: - fixed a bug in `pace.util.grid` where `_reduce_global_area_minmaxes` would use local values instead of the gathered ones - Added .cleanup() method to ZarrMonitor, used only for API compatibility with NetCDFMonitor and does nothing - ZarrMonitor.partitioner may now be any Partitioner and not just a CubedSpherePartitioner +- Quantity no longer has a `storage` attribute - the ndarray is directly accessible through the `data` attribute. Minor changes: - Fixed a bug in normalize_vector(xyz) in `pace.util.grid.gnomonic` where it would divide the input by cells-per-tile, where it should not. diff --git a/util/pace/util/checkpointer/thresholds.py b/util/pace/util/checkpointer/thresholds.py index d7a3b12ca..cef708992 100644 --- a/util/pace/util/checkpointer/thresholds.py +++ b/util/pace/util/checkpointer/thresholds.py @@ -3,16 +3,21 @@ import dataclasses from typing import Dict, List, Mapping, Union -import gt4py.storage import numpy as np from ..quantity import Quantity from .base import Checkpointer +try: + import cupy as cp +except ImportError: + cp = None + + SavepointName = str VariableName = str -ArrayLike = Union[Quantity, gt4py.storage.Storage, np.ndarray] +ArrayLike = Union[Quantity, cp.ndarray, np.ndarray] class InsufficientTrialsError(Exception): diff --git a/util/pace/util/grid/generation.py b/util/pace/util/grid/generation.py index 04f95e0c8..defb364d5 100644 --- a/util/pace/util/grid/generation.py +++ b/util/pace/util/grid/generation.py @@ -2356,10 +2356,10 @@ def _calculate_2d_edge_a2c_vect_factors(self): return edge_vect_e_2d, edge_vect_w_2d def _reduce_global_area_minmaxes(self): - min_area = self._np.min(self.area.storage[3:-4, 3:-4])[()] - max_area = self._np.max(self.area.storage[3:-4, 3:-4])[()] - min_area_c = self._np.min(self.area_c.storage[3:-4, 3:-4])[()] - max_area_c = self._np.max(self.area_c.storage[3:-4, 3:-4])[()] + min_area = self._np.min(self.area.data[3:-4, 3:-4])[()] + max_area = self._np.max(self.area.data[3:-4, 3:-4])[()] + min_area_c = self._np.min(self.area_c.data[3:-4, 3:-4])[()] + max_area_c = self._np.max(self.area_c.data[3:-4, 3:-4])[()] self._da_min = float(self._comm.comm.allreduce(min_area, min)) self._da_max = float(self._comm.comm.allreduce(max_area, max)) self._da_min_c = float(self._comm.comm.allreduce(min_area_c, min)) diff --git a/util/pace/util/initialization/allocator.py b/util/pace/util/initialization/allocator.py index f26e4fb27..251c3da19 100644 --- a/util/pace/util/initialization/allocator.py +++ b/util/pace/util/initialization/allocator.py @@ -3,7 +3,7 @@ import numpy as np from .._optional_imports import gt4py -from ..constants import SPATIAL_DIMS, X_DIMS, Y_DIMS, Z_DIMS +from ..constants import SPATIAL_DIMS from ..quantity import Quantity from .sizer import GridSizer @@ -26,9 +26,16 @@ def __init__(self, backend: str): Args: backend: gt4py backend """ - self.empty = _wrap_storage_call(gt4py.storage.empty, backend) - self.zeros = _wrap_storage_call(gt4py.storage.zeros, backend) - self.ones = _wrap_storage_call(gt4py.storage.ones, backend) + self.backend = backend + + def empty(self, *args, **kwargs) -> np.ndarray: + return gt4py.storage.empty(*args, backend=self.backend, **kwargs) + + def ones(self, *args, **kwargs) -> np.ndarray: + return gt4py.storage.ones(*args, backend=self.backend, **kwargs) + + def zeros(self, *args, **kwargs) -> np.ndarray: + return gt4py.storage.zeros(*args, backend=self.backend, **kwargs) class QuantityFactory: @@ -103,17 +110,18 @@ def _allocate( origin = self.sizer.get_origin(dims) extent = self.sizer.get_extent(dims) shape = self.sizer.get_shape(dims) - mask = tuple( - [ - any(dim in coord_dims for dim in dims) - for coord_dims in [X_DIMS, Y_DIMS, Z_DIMS] - ] - ) - extra_dims = [i for i in dims if i not in SPATIAL_DIMS] - if len(extra_dims) > 0 or not dims: - mask = None + dimensions = [ + axis + if any(dim in axis_dims for axis_dims in SPATIAL_DIMS) + else str(shape[index]) + for index, (dim, axis) in enumerate( + zip(dims, ("I", "J", "K", *([None] * (len(dims) - 3)))) + ) + ] try: - data = allocator(shape, dtype=dtype, default_origin=origin, mask=mask) + data = allocator( + shape, dtype=dtype, aligned_index=origin, dimensions=dimensions + ) except TypeError: data = allocator(shape, dtype=dtype) return Quantity(data, dims=dims, units=units, origin=origin, extent=extent) diff --git a/util/pace/util/quantity.py b/util/pace/util/quantity.py index e3c5b923a..073bd5f16 100644 --- a/util/pace/util/quantity.py +++ b/util/pace/util/quantity.py @@ -280,47 +280,30 @@ def __init__( extent = tuple(extent) if isinstance(data, (int, float, list)): + # If converting basic data, use a numpy ndarray. data = np.asarray(data) - elif gt4py is not None and isinstance(data, gt4py.storage.storage.Storage): - if gt4py_backend is not None: - raise TypeError( - "cannot select gt4py backend with keyword argument " - "when providing storage as data" - ) - else: - gt4py_backend = data.backend - if isinstance(data, gt4py.storage.storage.GPUStorage): - self._storage = data - self._data = data.gpu_view - elif isinstance(data, gt4py.storage.storage.CPUStorage): - self._storage = data - self._data = data.data - else: + + if gt4py_backend is not None: + if not isinstance(data, (np.ndarray, cupy.ndarray)): raise TypeError( - "only storages supported are CPUStorage and GPUStorage, " - f"got {type(data)}" + f"Only supports numpy.ndarray and cupy.ndarray, got {type(data)}" ) - elif gt4py_backend is not None: - extra_dims = [i for i in dims if i not in constants.SPATIAL_DIMS] - if len(extra_dims) > 0 or not dims: - mask = None - else: - mask = tuple( - [ - any(dim in coord_dims for dim in dims) - for coord_dims in [ - constants.X_DIMS, - constants.Y_DIMS, - constants.Z_DIMS, - ] - ] - ) - self._storage, self._data = self._initialize_storage( - data, origin=origin, gt4py_backend=gt4py_backend, mask=mask + # If not passing data, allocate it using the `gt4py_backend` parameter. + dimensions = tuple( + [ + axis + if any(dim in axis_dims for axis_dims in constants.SPATIAL_DIMS) + else str(dims[index]) + for index, (dim, axis) in enumerate( + zip(dims, ("I", "J", "K", *([None] * (len(dims) - 3)))) + ) + ] + ) + self._data = self._initialize_data( + data, origin=origin, gt4py_backend=gt4py_backend, dimensions=dimensions ) else: self._data = data - self._storage = None _validate_quantity_property_lengths(data.shape, dims, origin, extent) self._metadata = QuantityMetadata( @@ -387,44 +370,17 @@ def sel(self, **kwargs: Union[slice, int]) -> np.ndarray: """ return self.view[tuple(kwargs.get(dim, slice(None, None)) for dim in self.dims)] - @property - def storage(self): - """A gt4py storage representing the data in this Quantity. - - Will raise TypeError if the gt4py backend was not specified when initializing - this object, either by providing a Storage for data or explicitly specifying - a backend. - """ - if self._storage is None: - raise TypeError( - "gt4py backend was not specified when initializing this object" - ) - return self._storage - - def _initialize_storage(self, data, origin, gt4py_backend: str, mask: Tuple): - storage = gt4py.storage.storage.empty( - gt4py_backend, - default_origin=origin, - shape=data.shape, - dtype=data.dtype, - mask=mask, - managed_memory=True, # required to get GPUStorage with only gpu data copy + def _initialize_data(self, data, origin, gt4py_backend: str, dimensions: Tuple): + """Allocates an ndarray with optimal memory layout, and copies the data over.""" + storage = gt4py.storage.empty( + data.shape, + data.dtype, + backed=gt4py_backend, + aligned_index=origin, + dimensions=dimensions, ) storage[...] = data - # storage must initialize new memory. when GDP-3 is merged, we can instead - # initialize storage from self._data - # when GDP-3 is merged, we can instead use the data in self._data to - # initialize the storage, instead of making a copy. - if isinstance(storage, gt4py.storage.storage.CPUStorage): - data = storage.data - elif isinstance(storage, gt4py.storage.storage.GPUStorage): - data = storage.gpu_view - else: - raise NotImplementedError( - f"received unexpected storage type {type(storage)} " - f"for gt4py_backend {gt4py_backend}, did gt4py get updated?" - ) - return storage, data + return storage @property def metadata(self) -> QuantityMetadata: @@ -489,20 +445,20 @@ def np(self) -> NumpyModule: @property def __array_interface__(self): - return self.storage.__array_interface__ + return self.data.__array_interface__ @property def __cuda_array_interface__(self): - return self.storage.__cuda_array_interface__ + return self.data.__cuda_array_interface__ @property def shape(self): - return self.storage.shape + return self.data.shape def __descriptor__(self): if self._storage is None: return None # trigger DaCe JIT - return self._storage.__descriptor__() + return self.__descriptor__() def transpose(self, target_dims: Sequence[Union[str, Iterable[str]]]) -> "Quantity": """Change the dimension order of this Quantity. diff --git a/util/pace/util/testing/__init__.py b/util/pace/util/testing/__init__.py index 07af1fb9b..a1c927e97 100644 --- a/util/pace/util/testing/__init__.py +++ b/util/pace/util/testing/__init__.py @@ -1,4 +1,3 @@ -from . import gt4py_cupy, gt4py_numpy from .comparison import compare_arr, compare_scalar, success, success_array from .dummy_comm import ConcurrencyError, DummyComm from .perturbation import perturb diff --git a/util/pace/util/testing/gt4py_cupy.py b/util/pace/util/testing/gt4py_cupy.py deleted file mode 100644 index b482f1bed..000000000 --- a/util/pace/util/testing/gt4py_cupy.py +++ /dev/null @@ -1,32 +0,0 @@ -"""This module provides a cupy-style wrapper around certain gt4py functions""" -try: - import gt4py -except ImportError: - gt4py = None -try: - import cupy -except ImportError: - cupy = None -import numpy - -from .gt4py_numpy import inject_attrs, inject_storage_methods - - -if cupy is not None: - inject_storage_methods(locals(), "cuda") - inject_attrs(locals(), cupy) - - def all(a, axis=None, out=None, keepdims=False): - """Tests whether all array elements along a given axis evaluate to True. - Args: - a (gt4py.storage.GPUStorage): Input array. - axis (int or tuple of ints): Along which axis to compute all. - The flattened array is used by default. - out (cupy.ndarray): Output array. - keepdims (bool): If ``True``, the axis is remained as an axis of - size one. - Returns: - cupy.ndarray: An array reduced of the input array along the axis. - .. seealso:: :func:`numpy.all` - """ - return numpy.all(a, axis=axis, out=out, keepdims=keepdims) diff --git a/util/pace/util/testing/gt4py_numpy.py b/util/pace/util/testing/gt4py_numpy.py deleted file mode 100644 index 76f42f477..000000000 --- a/util/pace/util/testing/gt4py_numpy.py +++ /dev/null @@ -1,30 +0,0 @@ -"""This module provides a numpy-style wrapper around certain gt4py functions""" -try: - import gt4py -except ImportError: - gt4py = None -import numpy as np - - -def _wrap_storage_call(function, backend): - def wrapped(shape, dtype=float): - return function(backend, [0] * len(shape), shape, dtype, managed_memory=True) - - wrapped.__name__ = function.__name__ - return wrapped - - -def inject_storage_methods(attr_dict, backend): - if gt4py is not None: - attr_dict["zeros"] = _wrap_storage_call(gt4py.storage.zeros, backend) - attr_dict["ones"] = _wrap_storage_call(gt4py.storage.ones, backend) - attr_dict["empty"] = _wrap_storage_call(gt4py.storage.empty, backend) - - -def inject_attrs(attr_dict, module): - for name in set(dir(module)).difference(attr_dict.keys()): - attr_dict[name] = getattr(module, name) - - -inject_storage_methods(locals(), "numpy") -inject_attrs(locals(), np) diff --git a/util/pace/util/utils.py b/util/pace/util/utils.py index 1930abbf8..ab2e002e8 100644 --- a/util/pace/util/utils.py +++ b/util/pace/util/utils.py @@ -18,13 +18,6 @@ else: GPU_AVAILABLE = False -try: - from gt4py.storage.storage import Storage -except ImportError: - - class Storage: # type: ignore[no-redef] - pass - T = TypeVar("T") @@ -47,29 +40,21 @@ def list_by_dims( return tuple(return_list) -def is_contiguous(array: Union[np.ndarray, Storage]) -> bool: - if isinstance(array, Storage): - # gt4py storages use numpy arrays for .data attribute instead of memoryvie - return array.data.flags["C_CONTIGUOUS"] or array.flags["F_CONTIGUOUS"] - else: - return array.flags["C_CONTIGUOUS"] or array.flags["F_CONTIGUOUS"] +def is_contiguous(array: Union[np.ndarray, cp.ndarray]) -> bool: + return array.flags["C_CONTIGUOUS"] or array.flags["F_CONTIGUOUS"] -def is_c_contiguous(array: Union[np.ndarray, Storage]) -> bool: - if isinstance(array, Storage): - # gt4py storages use numpy arrays for .data attribute instead of memoryview - return array.data.flags["C_CONTIGUOUS"] - else: - return array.flags["C_CONTIGUOUS"] +def is_c_contiguous(array: Union[np.ndarray, cp.ndarray]) -> bool: + return array.flags["C_CONTIGUOUS"] -def ensure_contiguous(maybe_array: Union[np.ndarray, Storage]) -> None: - if isinstance(maybe_array, np.ndarray) and not is_contiguous(maybe_array): +def ensure_contiguous(maybe_array: Union[np.ndarray, np.ndarray, None]) -> None: + if maybe_array is not None and not is_contiguous(maybe_array): raise ValueError("ndarray is not contiguous") def safe_assign_array( - to_array: Union[np.ndarray, Storage], from_array: Union[np.ndarray, Storage] + to_array: Union[np.ndarray, cp.ndarray], from_array: Union[np.ndarray, cp.ndarray] ): """Failproof assignment for array on different devices. diff --git a/util/tests/conftest.py b/util/tests/conftest.py index b9602aebf..6bb91b9fb 100644 --- a/util/tests/conftest.py +++ b/util/tests/conftest.py @@ -1,8 +1,6 @@ import numpy as np import pytest -import pace.util - try: import gt4py @@ -51,9 +49,10 @@ def numpy(backend): elif backend == "cupy": return cupy elif backend == "gt4py_numpy": - return pace.util.testing.gt4py_numpy + # TODO: Deprecate these "backends". + return np elif backend == "gt4py_cupy": - return pace.util.testing.gt4py_cupy + return cupy else: raise NotImplementedError() diff --git a/util/tests/quantity/test_storage.py b/util/tests/quantity/test_storage.py index 0c602b78c..a882797d8 100644 --- a/util/tests/quantity/test_storage.py +++ b/util/tests/quantity/test_storage.py @@ -9,9 +9,9 @@ except ImportError: gt4py = None try: - import cupy + import cupy as cp except ImportError: - cupy = None + cp = None @pytest.fixture @@ -69,7 +69,7 @@ def quantity(data, origin, extent, dims, units): def test_numpy(quantity, backend): if "cupy" in backend: - assert quantity.np is cupy + assert quantity.np is cp else: assert quantity.np is np @@ -101,9 +101,9 @@ def test_modifying_numpy_storage_modifies_view(): @pytest.mark.parametrize("backend", ["gt4py_numpy", "gt4py_cupy"], indirect=True) def test_storage_exists(quantity, backend): if "numpy" in backend: - assert isinstance(quantity.storage, gt4py.storage.storage.CPUStorage) + assert isinstance(quantity.data, np.ndarray) else: - assert isinstance(quantity.storage, gt4py.storage.storage.GPUStorage) + assert isinstance(quantity.data, cp.ndarray) @pytest.mark.parametrize("backend", ["numpy", "cupy"], indirect=True) @@ -114,7 +114,7 @@ def test_storage_does_not_exist(quantity, backend): def test_data_is_not_storage(quantity, backend): if gt4py is not None: - assert not isinstance(quantity.data, gt4py.storage.storage.Storage) + assert not isinstance(quantity.data, (np.ndaray, cp.ndarray)) @pytest.mark.parametrize("backend", ["gt4py_numpy", "gt4py_cupy"], indirect=True) @@ -189,15 +189,14 @@ def test_numpy_data_becomes_cupy_with_gpu_backend( units=units, gt4py_backend=gt4py_backend, ) - assert isinstance(quantity.data, cupy.ndarray) - assert isinstance(quantity.storage, gt4py.storage.storage.GPUStorage) + assert isinstance(quantity.data, cp.ndarray) @pytest.mark.parametrize("backend", ["gt4py_numpy"], indirect=True) def test_cannot_use_cpu_storage_with_gpu_backend( data, origin, extent, dims, units, gt4py_backend ): - assert isinstance(data, gt4py.storage.storage.CPUStorage) + assert isinstance(data, np.ndarray) with pytest.raises(TypeError): pace.util.Quantity( data, @@ -213,7 +212,7 @@ def test_cannot_use_cpu_storage_with_gpu_backend( def test_cannot_use_gpu_storage_with_cpu_backend( data, origin, extent, dims, units, gt4py_backend ): - assert isinstance(data, gt4py.storage.storage.GPUStorage) + assert isinstance(data, cp.ndarray) with pytest.raises(TypeError): pace.util.Quantity( data, diff --git a/util/tests/test_cube_scatter_gather.py b/util/tests/test_cube_scatter_gather.py index c32970d1d..523cb0de1 100644 --- a/util/tests/test_cube_scatter_gather.py +++ b/util/tests/test_cube_scatter_gather.py @@ -161,36 +161,6 @@ def get_quantity(dims, units, extent, n_halo, numpy): ) -@pytest.mark.parametrize("backend", ["gt4py_numpy", "gt4py_cupy"], indirect=True) -@pytest.mark.parametrize("dims, layout", [["x,y,z", (2, 2)]], indirect=True) -def test_gathered_quantity_has_storage( - scattered_quantities, communicator_list, time, backend -): - for communicator, rank_quantity in reversed( - list(zip(communicator_list, scattered_quantities)) - ): - result = communicator.gather(send_quantity=rank_quantity) - if communicator.rank == 0: - assert isinstance(result.storage, gt4py.storage.storage.Storage) - else: - assert result is None - - -@pytest.mark.parametrize("backend", ["gt4py_numpy", "gt4py_cupy"], indirect=True) -@pytest.mark.parametrize("dims, layout", [["x,y,z", (2, 2)]], indirect=True) -def test_scattered_quantity_has_storage( - cube_quantity, communicator_list, time, backend -): - result_list = [] - for communicator in communicator_list: - if communicator.rank == 0: - result_list.append(communicator.scatter(send_quantity=cube_quantity)) - else: - result_list.append(communicator.scatter()) - for rank, result in enumerate(result_list): - assert isinstance(result.storage, gt4py.storage.storage.Storage) - - def test_cube_gather_state( cube_quantity, scattered_quantities, communicator_list, time, backend ): diff --git a/util/tests/test_tile_scatter_gather.py b/util/tests/test_tile_scatter_gather.py index 8866bd329..d5a60ed79 100644 --- a/util/tests/test_tile_scatter_gather.py +++ b/util/tests/test_tile_scatter_gather.py @@ -159,9 +159,8 @@ def test_gathered_quantity_has_storage( list(zip(communicator_list, scattered_quantities)) ): result = communicator.gather(send_quantity=rank_quantity) - if communicator.rank == 0: - print(result.gt4py_backend, result.metadata) - assert isinstance(result.storage, gt4py.storage.storage.Storage) + if communicator.rank != 0: + assert isinstance(result, pace.util.Quantity) else: assert result is None @@ -178,7 +177,7 @@ def test_scattered_quantity_has_storage( else: result_list.append(communicator.scatter()) for rank, result in enumerate(result_list): - assert isinstance(result.storage, gt4py.storage.storage.Storage) + assert isinstance(result, pace.util.Quantity) def test_tile_gather_state(