diff --git a/.github/actions/install-pypi/action.yml b/.github/actions/install-pypi/action.yml index abbda96c94e..db05ba94a75 100644 --- a/.github/actions/install-pypi/action.yml +++ b/.github/actions/install-pypi/action.yml @@ -44,6 +44,12 @@ runs: shell: bash run: echo "PIP_NO_BINARY=shapely" >> $GITHUB_ENV + - name: Upgrade pip and setuptools + shell: bash + run: | + python -m pip install --upgrade pip + python -m pip install --upgrade setuptools + - name: Set dependency groups for install shell: bash run: | diff --git a/ci/download_cartopy_maps.py b/ci/download_cartopy_maps.py index 68762ac0198..310eff218d7 100755 --- a/ci/download_cartopy_maps.py +++ b/ci/download_cartopy_maps.py @@ -21,9 +21,6 @@ def grab_ne(category, feature, res): for r in ['110m', '50m', '10m']: grab_ne('cultural', feat, r) - for feat, r in [('coastline', '10m'), ('coastline', '50m'), ('coastline', '110m'), - ('lakes', '10m'), ('lakes', '50m'), - ('land', '10m'), ('land', '50m'), ('land', '110m'), - ('ocean', '110m'), ('ocean', '50m'), - ('rivers_lake_centerlines', '10m'), ('rivers_lake_centerlines', '110m')]: - grab_ne('physical', feat, r) + for feat in ['coastline', 'lakes', 'land', 'ocean', 'rivers_lake_centerlines']: + for r in ['110m', '50m', '10m']: + grab_ne('physical', feat, r) diff --git a/conftest.py b/conftest.py index 834167081da..6825ca51621 100644 --- a/conftest.py +++ b/conftest.py @@ -111,9 +111,8 @@ def test_da_xy(): 'lambert_conformal': ([], '')}, coords={ 'time': xarray.DataArray( - numpy.array([numpy.datetime64('2018-07-01T00:00'), - numpy.datetime64('2018-07-01T06:00'), - numpy.datetime64('2018-07-01T12:00')]), + numpy.array(['2018-07-01T00:00', '2018-07-01T06:00', '2018-07-01T12:00'], + dtype='datetime64[ns]'), name='time', dims=['time'] ), @@ -153,17 +152,6 @@ def test_da_xy(): return ds.metpy.parse_cf('temperature') -@pytest.fixture() -def set_agg_backend(): - """Fixture to ensure the Agg backend is active.""" - prev_backend = matplotlib.pyplot.get_backend() - try: - matplotlib.pyplot.switch_backend('agg') - yield - finally: - matplotlib.pyplot.switch_backend(prev_backend) - - @pytest.fixture(params=['dask', 'xarray', 'masked', 'numpy']) def array_type(request): """Return an array type for testing calc functions.""" diff --git a/docs/conf.py b/docs/conf.py index 1683f016ba6..e09dcb2f571 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -9,7 +9,7 @@ # All configuration values have a default; values that are commented out # serve to show the default. -from datetime import datetime +from datetime import datetime, timezone import inspect import os from pathlib import Path @@ -129,7 +129,7 @@ # The encoding of source files. # source_encoding = 'utf-8-sig' -cur_date = datetime.utcnow() +cur_date = datetime.now(timezone.utc) # The main toctree document. master_doc = 'index' diff --git a/examples/meteogram_metpy.py b/examples/meteogram_metpy.py index 0dd259467a5..e0bff215de3 100644 --- a/examples/meteogram_metpy.py +++ b/examples/meteogram_metpy.py @@ -46,7 +46,7 @@ def __init__(self, fig, dates, probeid, time=None, axis=0): axis: number that controls the new axis to be plotted (FOR FUTURE) """ if not time: - time = dt.datetime.utcnow() + time = dt.datetime.now(dt.timezone.utc) self.start = dates[0] self.fig = fig self.end = dates[-1] diff --git a/pyproject.toml b/pyproject.toml index ba816b8c960..3d688bb5d73 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,13 +28,13 @@ requires-python = ">=3.9" dependencies = [ "matplotlib>=3.5.0", "numpy>=1.20.0", - "pandas>=1.2.0", - "pint>=0.15", + "pandas>=1.4.0", + "pint>=0.17", "pooch>=1.2.0", "pyproj>=3.0.0", - "scipy>=1.6.0", + "scipy>=1.8.0", "traitlets>=5.0.5", - "xarray>=0.18.0" + "xarray>=0.21.0" ] [project.entry-points."xarray.backends"] @@ -103,6 +103,18 @@ norecursedirs = "build docs .idea" doctest_optionflags = "NORMALIZE_WHITESPACE" mpl-results-path = "test_output" xfail_strict = true +filterwarnings = [ + "error", + "ignore:numpy.ndarray size changed:RuntimeWarning", + # To be removed in the next python-dateutil release. + # See: https://github.com/dateutil/dateutil/issues/1314 + 'ignore:datetime.datetime.utcfromtimestamp\(\) is deprecated:DeprecationWarning:dateutil.tz.tz:37', + # Numpy deprecation triggered by Pint: https://github.com/hgrecco/pint/pull/1880 + "ignore:Conversion of an array with ndim > 0 to a scalar is deprecated:DeprecationWarning:pint.facets.plain.quantity:575", + # PyProj automatically dispatching for single point, will be waiting for NumPy 2.0 to address + # See: https://github.com/pyproj4/pyproj/issues/1309 + "ignore:Conversion of an array with ndim > 0 to a scalar is deprecated:DeprecationWarning:pyproj.geod:404" +] [tool.ruff] line-length = 95 diff --git a/src/metpy/calc/basic.py b/src/metpy/calc/basic.py index ae77c82510e..53dd629c465 100644 --- a/src/metpy/calc/basic.py +++ b/src/metpy/calc/basic.py @@ -93,7 +93,7 @@ def wind_direction(u, v, convention='from'): origshape = wdir.shape wdir = np.atleast_1d(wdir) - # Handle oceanographic convection + # Handle oceanographic convention if convention == 'to': wdir -= units.Quantity(180., 'deg') elif convention not in ('to', 'from'): @@ -405,8 +405,8 @@ def apparent_temperature(temperature, relative_humidity, speed, face_level_winds # NB: older numpy.ma.where does not return a masked array app_temperature = masked_array( np.ma.where(masked_array(wind_chill_temperature).mask, - heat_index_temperature.to(temperature.units), - wind_chill_temperature.to(temperature.units) + heat_index_temperature.m_as(temperature.units), + wind_chill_temperature.m_as(temperature.units) ), temperature.units) # If mask_undefined is False, then set any masked values to the temperature @@ -829,6 +829,9 @@ def smooth_gaussian(scalar_grid, n): num_ax = len(scalar_grid.shape) # Assume the last two axes represent the horizontal directions sgma_seq = [sgma if i > num_ax - 3 else 0 for i in range(num_ax)] + # Drop units as necessary to avoid warnings from scipy doing so--units will be reattached + # if necessary by wrapper + scalar_grid = getattr(scalar_grid, 'magnitude', scalar_grid) filter_args = {'sigma': sgma_seq, 'truncate': 2 * np.sqrt(2)} if hasattr(scalar_grid, 'mask'): @@ -1104,6 +1107,8 @@ def zoom_xarray(input_field, zoom, output=None, order=3, mode='constant', cval=0 available. """ + # Dequantify input to avoid warnings and make sure units propagate + input_field = input_field.metpy.dequantify() # Zoom data zoomed_data = scipy_zoom( input_field.data, zoom, output=output, order=order, mode=mode, cval=cval, diff --git a/src/metpy/calc/cross_sections.py b/src/metpy/calc/cross_sections.py index acc8bba28a5..1b33330a389 100644 --- a/src/metpy/calc/cross_sections.py +++ b/src/metpy/calc/cross_sections.py @@ -123,8 +123,8 @@ def unit_vectors_from_cross_section(cross, index='index'): """ x, y = distances_from_cross_section(cross) - dx_di = first_derivative(x, axis=index).values - dy_di = first_derivative(y, axis=index).values + dx_di = first_derivative(x, axis=index).data + dy_di = first_derivative(y, axis=index).data tangent_vector_mag = np.hypot(dx_di, dy_di) unit_tangent_vector = np.vstack([dx_di / tangent_vector_mag, dy_di / tangent_vector_mag]) unit_normal_vector = np.vstack([-dy_di / tangent_vector_mag, dx_di / tangent_vector_mag]) diff --git a/src/metpy/calc/thermo.py b/src/metpy/calc/thermo.py index 0f36e33efd9..4f17a00fd84 100644 --- a/src/metpy/calc/thermo.py +++ b/src/metpy/calc/thermo.py @@ -1638,7 +1638,7 @@ def saturation_equivalent_potential_temperature(pressure, temperature): e = saturation_vapor_pressure(temperature).to('hPa').magnitude r = saturation_mixing_ratio(pressure, temperature).magnitude - th_l = t * (1000 / (p - e)) ** mpconsts.kappa + th_l = t * (1000 / (p - e)) ** mpconsts.nounit.kappa th_es = th_l * np.exp((3036. / t - 1.78) * r * (1 + 0.448 * r)) return units.Quantity(th_es, units.kelvin) diff --git a/src/metpy/calc/tools.py b/src/metpy/calc/tools.py index 895ad216d7f..16e2c8c2da2 100644 --- a/src/metpy/calc/tools.py +++ b/src/metpy/calc/tools.py @@ -25,19 +25,19 @@ UND = 'UND' UND_ANGLE = -999. -DIR_STRS = ( +DIR_STRS = [ 'N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW', UND -) # note the order matters! +] # note the order matters! MAX_DEGREE_ANGLE = units.Quantity(360, 'degree') BASE_DEGREE_MULTIPLIER = units.Quantity(22.5, 'degree') DIR_DICT = {dir_str: i * BASE_DEGREE_MULTIPLIER for i, dir_str in enumerate(DIR_STRS)} -DIR_DICT[UND] = np.nan +DIR_DICT[UND] = units.Quantity(np.nan, 'degree') @exporter.export @@ -1773,16 +1773,15 @@ def parse_angle(input_dir): """ if isinstance(input_dir, str): - # abb_dirs = abbrieviated directions - abb_dirs = _clean_direction([_abbrieviate_direction(input_dir)]) + abb_dir = _clean_direction([_abbreviate_direction(input_dir)])[0] + return DIR_DICT[abb_dir] elif hasattr(input_dir, '__len__'): # handle np.array, pd.Series, list, and array-like input_dir_str = ','.join(_clean_direction(input_dir, preprocess=True)) - abb_dir_str = _abbrieviate_direction(input_dir_str) + abb_dir_str = _abbreviate_direction(input_dir_str) abb_dirs = _clean_direction(abb_dir_str.split(',')) + return units.Quantity.from_list(itemgetter(*abb_dirs)(DIR_DICT)) else: # handle unrecognizable scalar - return np.nan - - return itemgetter(*abb_dirs)(DIR_DICT) + return units.Quantity(np.nan, 'degree') def _clean_direction(dir_list, preprocess=False): @@ -1795,7 +1794,7 @@ def _clean_direction(dir_list, preprocess=False): for the_dir in dir_list] -def _abbrieviate_direction(ext_dir_str): +def _abbreviate_direction(ext_dir_str): """Convert extended (non-abbreviated) directions to abbreviation.""" return (ext_dir_str .upper() @@ -1846,11 +1845,10 @@ def angle_to_direction(input_angle, full=False, level=3): # clean any numeric strings, negatives, and None does not handle strings with alphabet input_angle = units.Quantity(np.array(input_angle).astype(float), origin_units) - input_angle[input_angle < 0] = units.Quantity(np.nan, origin_units) + input_angle[input_angle < 0] = np.nan - # normalizer used for angles > 360 degree to normalize between 0 - 360 - normalizer = np.array(input_angle.m / MAX_DEGREE_ANGLE.m, dtype=int) - norm_angles = abs(input_angle - MAX_DEGREE_ANGLE * normalizer) + # Normalize between 0 - 360 + norm_angles = input_angle % MAX_DEGREE_ANGLE if level == 3: nskip = 1 @@ -1889,12 +1887,12 @@ def angle_to_direction(input_angle, full=False, level=3): return dir_str_arr dir_str_arr = ','.join(dir_str_arr) - dir_str_arr = _unabbrieviate_direction(dir_str_arr) + dir_str_arr = _unabbreviate_direction(dir_str_arr) return dir_str_arr.replace(',', ' ') if scalar else dir_str_arr.split(',') -def _unabbrieviate_direction(abb_dir_str): - """Convert abbrieviated directions to non-abbrieviated direction.""" +def _unabbreviate_direction(abb_dir_str): + """Convert abbreviated directions to non-abbreviated direction.""" return (abb_dir_str .upper() .replace(UND, 'Undefined ') diff --git a/src/metpy/interpolate/points.py b/src/metpy/interpolate/points.py index 36add3138a4..e33f7e19cb2 100644 --- a/src/metpy/interpolate/points.py +++ b/src/metpy/interpolate/points.py @@ -8,7 +8,7 @@ import numpy as np from scipy.interpolate import griddata, Rbf -from scipy.spatial import cKDTree, ConvexHull, Delaunay, qhull +from scipy.spatial import cKDTree, ConvexHull, Delaunay, QhullError from . import geometry, tools from ..package_tools import Exporter @@ -153,7 +153,7 @@ def natural_neighbor_point(xp, yp, variable, grid_loc, tri, neighbors, circumcen area_list.append(cur_area * value[0]) - except (ZeroDivisionError, qhull.QhullError) as e: + except (ZeroDivisionError, QhullError) as e: message = ('Error during processing of a grid. ' 'Interpolation will continue but be mindful ' f'of errors in output. {e}') diff --git a/src/metpy/interpolate/slices.py b/src/metpy/interpolate/slices.py index 33f390c5298..b11199565e7 100644 --- a/src/metpy/interpolate/slices.py +++ b/src/metpy/interpolate/slices.py @@ -7,7 +7,6 @@ import xarray as xr from ..package_tools import Exporter -from ..units import is_quantity, units from ..xarray import check_axis exporter = Exporter(globals()) @@ -50,17 +49,14 @@ def interpolate_to_slice(data, points, interp_type='linear'): 'your data has been parsed by MetPy with proper x and y ' 'dimension coordinates.') from None + data = data.metpy.dequantify() data_sliced = data.interp({ x.name: xr.DataArray(points[:, 0], dims='index', attrs=x.attrs), y.name: xr.DataArray(points[:, 1], dims='index', attrs=y.attrs) }, method=interp_type) data_sliced.coords['index'] = range(len(points)) - # Bug in xarray: interp strips units - if is_quantity(data.data) and not is_quantity(data_sliced.data): - data_sliced.data = units.Quantity(data_sliced.data, data.data.units) - - return data_sliced + return data_sliced.metpy.quantify() @exporter.export diff --git a/src/metpy/io/nexrad.py b/src/metpy/io/nexrad.py index ff76a1dae9b..f49b3615083 100644 --- a/src/metpy/io/nexrad.py +++ b/src/metpy/io/nexrad.py @@ -6,7 +6,7 @@ import bz2 from collections import defaultdict, namedtuple, OrderedDict import contextlib -import datetime +from datetime import datetime, timezone import logging import pathlib import re @@ -75,7 +75,8 @@ def bzip_blocks_decompress_all(data): def nexrad_to_datetime(julian_date, ms_midnight): """Convert NEXRAD date time format to python `datetime.datetime`.""" # Subtracting one from julian_date is because epoch date is 1 - return datetime.datetime.utcfromtimestamp((julian_date - 1) * day + ms_midnight * milli) + return datetime.fromtimestamp((julian_date - 1) * day + ms_midnight * milli, + tz=timezone.utc).replace(tzinfo=None) def remap_status(val): diff --git a/src/metpy/io/text.py b/src/metpy/io/text.py index 9ff480190da..9af16a99c11 100644 --- a/src/metpy/io/text.py +++ b/src/metpy/io/text.py @@ -4,7 +4,7 @@ """Support reading information from various text file formats.""" import contextlib -from datetime import datetime +from datetime import datetime, timezone import re import string @@ -95,7 +95,7 @@ def parse_wpc_surface_bulletin(bulletin, year=None): text = file.read().decode('utf-8') parsed_text = [] - valid_time = datetime.utcnow() + valid_time = datetime.now(timezone.utc).replace(tzinfo=None) for parts in _regroup_lines(text.splitlines()): # A single file may have multiple sets of data that are valid at different times. Set # the valid_time string that will correspond to all the following lines parsed, until diff --git a/src/metpy/plots/_util.py b/src/metpy/plots/_util.py index 1135d538e65..5992dc4f8c4 100644 --- a/src/metpy/plots/_util.py +++ b/src/metpy/plots/_util.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: BSD-3-Clause """Utilities for use in making plots.""" -from datetime import datetime +from datetime import datetime, timezone from matplotlib.collections import LineCollection import matplotlib.patheffects as mpatheffects @@ -24,7 +24,7 @@ def add_timestamp(ax, time=None, x=0.99, y=-0.04, ha='right', high_contrast=Fals ax : `matplotlib.axes.Axes` The `Axes` instance used for plotting time : `datetime.datetime` (or any object with a compatible ``strftime`` method) - Specific time to be plotted - datetime.utcnow will be use if not specified + Specific time to be plotted - ``datetime.now(UTC)`` will be use if not specified x : float Relative x position on the axes of the timestamp y : float @@ -52,7 +52,7 @@ def add_timestamp(ax, time=None, x=0.99, y=-0.04, ha='right', high_contrast=Fals text_args = {} text_args.update(**kwargs) if not time: - time = datetime.utcnow() + time = datetime.now(timezone.utc) timestr = time.strftime(time_format) # If we don't have a time string after that, assume xarray/numpy and see if item if not isinstance(timestr, str): diff --git a/src/metpy/plots/declarative.py b/src/metpy/plots/declarative.py index 182b7ef252e..7dd31a9fc46 100644 --- a/src/metpy/plots/declarative.py +++ b/src/metpy/plots/declarative.py @@ -10,6 +10,7 @@ from itertools import cycle import re +from matplotlib.contour import ContourSet import matplotlib.patheffects as patheffects import matplotlib.pyplot as plt import numpy as np @@ -683,7 +684,10 @@ def clear(self): """ if getattr(self, 'handle', None) is not None: - if getattr(self.handle, 'collections', None) is not None: + # In matplotlib 3.8, the collections attribute on ContourSet was deprecated. + # Check for that here so we can avoid the deprecation warning. + if (not isinstance(ContourSet.__dict__.get('collections'), property) + and getattr(self.handle, 'collections', None) is not None): self.clear_collections() else: self.clear_handle() @@ -1062,7 +1066,8 @@ def _build(self): kwargs.setdefault('colors', self.linecolor) kwargs.setdefault('linestyles', self.linestyle) - self.handle = self.parent.ax.contour(x_like, y_like, imdata, self.contours, **kwargs) + self.handle = self.parent.ax.contour(x_like, y_like, imdata.metpy.dequantify(), + self.contours, **kwargs) if self.clabels: self.handle.clabel(inline=1, fmt='%.0f', inline_spacing=8, use_clabeltext=True, fontsize=self.label_fontsize) @@ -1203,11 +1208,13 @@ def griddata(self): if self.plot_units is not None: data_subset_u = data_subset_u.metpy.convert_units(self.plot_units) + data_subset_u = data_subset_u.metpy.dequantify() data_subset_v = data_subset_v.metpy.convert_units(self.plot_units) + data_subset_v = data_subset_v.metpy.dequantify() self._griddata_u = data_subset_u * self.scale self._griddata_v = data_subset_v * self.scale - return (self._griddata_u, self._griddata_v) + return self._griddata_u, self._griddata_v @property def plotdata(self): diff --git a/src/metpy/plots/skewt.py b/src/metpy/plots/skewt.py index c5a2a2cf222..a349067b9d3 100644 --- a/src/metpy/plots/skewt.py +++ b/src/metpy/plots/skewt.py @@ -421,6 +421,10 @@ def plot_barbs(self, pressure, u, v, c=None, xloc=1.0, x_clip_radius=0.1, raise ValueError('To convert to plotting units, units must be attached to ' 'u and v wind components.') + # Drop units for u,v since they're not used and trigger warnings + u = getattr(u, 'magnitude', u) + v = getattr(v, 'magnitude', v) + # Assemble array of x-locations in axes space x = np.empty_like(pressure) x.fill(xloc) @@ -966,7 +970,8 @@ def plot_colormapped(self, u, v, c, intervals=None, colors=None, **kwargs): else: line_args = self._form_line_args(kwargs) - # Do the plotting + # Do the plotting -- drop units on c since it's not used + c = getattr(c, 'magnitude', c) lc = colored_line(u, v, c, **line_args) self.ax.add_collection(lc) return lc diff --git a/src/metpy/plots/station_plot.py b/src/metpy/plots/station_plot.py index 6bacd55ffd1..db2689a0bda 100644 --- a/src/metpy/plots/station_plot.py +++ b/src/metpy/plots/station_plot.py @@ -339,9 +339,7 @@ def _vector_plotting_units(u, v, plotting_units): 'u and v wind components.') # Strip units, CartoPy transform doesn't like - u = np.array(u) - v = np.array(v) - return u, v + return np.array(getattr(u, 'magnitude', u)), np.array(getattr(v, 'magnitude', v)) @staticmethod def _scalar_plotting_units(scalar_value, plotting_units): diff --git a/src/metpy/testing.py b/src/metpy/testing.py index 529b6609293..7f3de9f4856 100644 --- a/src/metpy/testing.py +++ b/src/metpy/testing.py @@ -13,6 +13,7 @@ import operator as op import re +import matplotlib.pyplot as plt import numpy as np import numpy.testing from packaging.version import Version @@ -126,6 +127,19 @@ def wrapped(*args, **kwargs): needs_cartopy = needs_module('cartopy') +@contextlib.contextmanager +def autoclose_figure(*args, **kwargs): + """Create a figure that is automatically closed when exiting a block. + + ``*args`` and ``**kwargs`` are forwarded onto the call to `plt.figure()`. + """ + fig = plt.figure(*args, **kwargs) + try: + yield fig + finally: + plt.close(fig) + + def get_upper_air_data(date, station): """Get upper air observations from the test data cache. diff --git a/src/metpy/units.py b/src/metpy/units.py index 726939b18f7..9ae7c2c7071 100644 --- a/src/metpy/units.py +++ b/src/metpy/units.py @@ -209,6 +209,7 @@ def masked_array(data, data_units=None, **kwargs): """ if data_units is None: data_units = data.units + data = data.magnitude return units.Quantity(np.ma.masked_array(data, **kwargs), data_units) diff --git a/src/metpy/xarray.py b/src/metpy/xarray.py index e45f03af027..4030a031ade 100644 --- a/src/metpy/xarray.py +++ b/src/metpy/xarray.py @@ -1316,6 +1316,7 @@ def cast_variables(arg, arg_name): for i, arg in enumerate(wrap_like): if isinstance(arg, str): match[i] = bound_args.arguments[arg] + match = tuple(match) # Cast all DataArrays to Pint Quantities _mutate_arguments(bound_args, xr.DataArray, lambda arg, _: arg.metpy.unit_array) @@ -1336,7 +1337,7 @@ def cast_variables(arg, arg_name): else: wrapping = _wrap_output_like_not_matching_units - if isinstance(match, list): + if isinstance(match, tuple): return tuple(wrapping(*args) for args in zip(result, match)) else: return wrapping(result, match) diff --git a/tests/calc/test_basic.py b/tests/calc/test_basic.py index abc1f6ba9ec..c685f69e1fb 100644 --- a/tests/calc/test_basic.py +++ b/tests/calc/test_basic.py @@ -800,6 +800,7 @@ def test_altimiter_to_sea_level_pressure_inhg(): assert_almost_equal(res, truth, 3) +@pytest.mark.filterwarnings('ignore:overflow encountered in exp:RuntimeWarning') def test_altimeter_to_sea_level_pressure_hpa(array_type): """Test the altimeter to sea level pressure function with hectopascals.""" mask = [False, True, False, True] diff --git a/tests/calc/test_calc_tools.py b/tests/calc/test_calc_tools.py index 4eeda3f40fe..b351372bf32 100644 --- a/tests/calc/test_calc_tools.py +++ b/tests/calc/test_calc_tools.py @@ -59,7 +59,9 @@ def test_find_intersections(direction, expected): y1 = 3 * x**2 y2 = 100 * x - 650 # Note: Truth is what we will get with this sampling, not the mathematical intersection - assert_array_almost_equal(expected, find_intersections(x, y1, y2, direction=direction), 2) + x_int, y_int = find_intersections(x, y1, y2, direction=direction) + assert_array_almost_equal(x_int, expected[0], 2) + assert_array_almost_equal(y_int, expected[1], 2) def test_find_intersections_no_intersections(): @@ -67,10 +69,10 @@ def test_find_intersections_no_intersections(): x = np.linspace(5, 30, 17) y1 = 3 * x + 0 y2 = 5 * x + 5 - # Note: Truth is what we will get with this sampling, not the mathematical intersection - truth = np.array([[], - []]) - assert_array_equal(truth, find_intersections(x, y1, y2)) + + x_int, y_int = find_intersections(x, y1, y2) + assert_array_equal(x_int, np.array([])) + assert_array_equal(y_int, np.array([])) def test_find_intersections_invalid_direction(): @@ -107,7 +109,9 @@ def test_find_intersections_intersections_in_data_at_ends(direction, expected): x = np.arange(14) y1 = np.array([0, 3, 2, 1, -1, 2, 2, 0, 1, 0, 0, -2, 2, 0]) y2 = np.zeros_like(y1) - assert_array_almost_equal(expected, find_intersections(x, y1, y2, direction=direction), 2) + x_int, y_int = find_intersections(x, y1, y2, direction=direction) + assert_array_almost_equal(x_int, expected[0], 2) + assert_array_almost_equal(y_int, expected[1], 2) @pytest.mark.parametrize('mask, expected_idx, expected_element', [ @@ -693,15 +697,15 @@ def test_laplacian_2d(deriv_2d_data): assert_array_almost_equal(laplac, laplac_true, 5) -def test_parse_angle_abbrieviated(): - """Test abbrieviated directional text in degrees.""" +def test_parse_angle_abbreviated(): + """Test abbreviated directional text in degrees.""" expected_angles_degrees = FULL_CIRCLE_DEGREES output_angles_degrees = parse_angle(DIR_STRS[:-1]) assert_array_almost_equal(output_angles_degrees, expected_angles_degrees) def test_parse_angle_ext(): - """Test extended (unabbrieviated) directional text in degrees.""" + """Test extended (unabbreviated) directional text in degrees.""" test_dir_strs = ['NORTH', 'NORTHnorthEast', 'North_East', 'East__North_East', 'easT', 'east south east', 'south east', ' south southeast', 'SOUTH', 'SOUTH SOUTH WEST', 'southWEST', 'WEST south_WEST', @@ -712,7 +716,7 @@ def test_parse_angle_ext(): def test_parse_angle_mix_multiple(): - """Test list of extended (unabbrieviated) directional text in degrees in one go.""" + """Test list of extended (unabbreviated) directional text in degrees in one go.""" test_dir_strs = ['NORTH', 'nne', 'ne', 'east north east', 'easT', 'east se', 'south east', ' south southeast', 'SOUTH', 'SOUTH SOUTH WEST', 'sw', 'WEST south_WEST', @@ -723,7 +727,7 @@ def test_parse_angle_mix_multiple(): def test_parse_angle_none(): - """Test list of extended (unabbrieviated) directional text in degrees in one go.""" + """Test list of extended (unabbreviated) directional text in degrees in one go.""" test_dir_strs = None expected_angles_degrees = np.nan output_angles_degrees = parse_angle(test_dir_strs) @@ -731,7 +735,7 @@ def test_parse_angle_none(): def test_parse_angle_invalid_number(): - """Test list of extended (unabbrieviated) directional text in degrees in one go.""" + """Test list of extended (unabbreviated) directional text in degrees in one go.""" test_dir_strs = 365. expected_angles_degrees = np.nan output_angles_degrees = parse_angle(test_dir_strs) @@ -739,15 +743,16 @@ def test_parse_angle_invalid_number(): def test_parse_angle_invalid_arr(): - """Test list of extended (unabbrieviated) directional text in degrees in one go.""" + """Test list of extended (unabbreviated) directional text in degrees in one go.""" test_dir_strs = ['nan', None, np.nan, 35, 35.5, 'north', 'andrewiscool'] expected_angles_degrees = [np.nan, np.nan, np.nan, np.nan, np.nan, 0, np.nan] output_angles_degrees = parse_angle(test_dir_strs) + assert isinstance(output_angles_degrees, units.Quantity) assert_array_almost_equal(output_angles_degrees, expected_angles_degrees) def test_parse_angle_mix_multiple_arr(): - """Test list of extended (unabbrieviated) directional text in degrees in one go.""" + """Test list of extended (unabbreviated) directional text in degrees in one go.""" test_dir_strs = np.array(['NORTH', 'nne', 'ne', 'east north east', 'easT', 'east se', 'south east', ' south southeast', 'SOUTH', 'SOUTH SOUTH WEST', 'sw', 'WEST south_WEST', @@ -791,14 +796,16 @@ def test_gradient_2d(deriv_2d_data): [-3, -1, 4], [-3, -1, 4], [-3, -1, 4]])) - assert_array_almost_equal(res, truth, 5) + for r, t in zip(res, truth): + assert_array_almost_equal(r, t, 5) def test_gradient_4d(deriv_4d_data): """Test gradient with 4D arrays.""" res = gradient(deriv_4d_data, deltas=(1, 1, 1, 1)) truth = tuple(factor * np.ones_like(deriv_4d_data) for factor in (48., 16., 4., 1.)) - assert_array_almost_equal(res, truth, 8) + for r, t in zip(res, truth): + assert_array_almost_equal(r, t, 8) def test_gradient_restricted_axes(deriv_2d_data): @@ -813,7 +820,8 @@ def test_gradient_restricted_axes(deriv_2d_data): [[-3], [-1], [4]], [[-3], [-1], [4]], [[-3], [-1], [4]]])) - assert_array_almost_equal(res, truth, 5) + for r, t in zip(res, truth): + assert_array_almost_equal(r, t, 5) def test_bounding_indices(): @@ -876,6 +884,7 @@ def test_angle_to_direction_full(): assert_array_equal(output_dirs, expected_dirs) +@pytest.mark.filterwarnings('ignore:invalid value encountered in remainder:RuntimeWarning') def test_angle_to_direction_invalid_scalar(): """Test invalid angle.""" expected_dirs = UND @@ -883,6 +892,7 @@ def test_angle_to_direction_invalid_scalar(): assert_array_equal(output_dirs, expected_dirs) +@pytest.mark.filterwarnings('ignore:invalid value encountered in remainder:RuntimeWarning') def test_angle_to_direction_invalid_arr(): """Test array of invalid angles.""" expected_dirs = ['NE', UND, UND, UND, 'N'] @@ -991,7 +1001,8 @@ def test_3d_gradient_3d_data_no_axes(deriv_4d_data): test = deriv_4d_data[0] res = gradient(test, deltas=(1, 1, 1)) truth = tuple(factor * np.ones_like(test) for factor in (16., 4., 1.)) - assert_array_almost_equal(res, truth, 8) + for r, t in zip(res, truth): + assert_array_almost_equal(r, t, 8) def test_2d_gradient_3d_data_no_axes(deriv_4d_data): @@ -1014,14 +1025,16 @@ def test_2d_gradient_4d_data_2_axes_3_deltas(deriv_4d_data): """Test 2D gradient of 4D data with 2 axes and 3 deltas.""" res = gradient(deriv_4d_data, deltas=(1, 1, 1), axes=(-2, -1)) truth = tuple(factor * np.ones_like(deriv_4d_data) for factor in (4., 1.)) - assert_array_almost_equal(res, truth, 8) + for r, t in zip(res, truth): + assert_array_almost_equal(r, t, 8) def test_2d_gradient_4d_data_2_axes_2_deltas(deriv_4d_data): """Test 2D gradient of 4D data with 2 axes and 2 deltas.""" res = gradient(deriv_4d_data, deltas=(1, 1), axes=(0, 1)) truth = tuple(factor * np.ones_like(deriv_4d_data) for factor in (48., 16.)) - assert_array_almost_equal(res, truth, 8) + for r, t in zip(res, truth): + assert_array_almost_equal(r, t, 8) def test_2d_gradient_4d_data_2_axes_1_deltas(deriv_4d_data): @@ -1106,7 +1119,7 @@ def test_first_derivative_xarray_time_subsecond_precision(): coords={'time': np.array(['2019-01-01T00:00:00.0', '2019-01-01T00:00:00.1', '2019-01-01T00:00:00.2'], - dtype='datetime64[ms]')}, + dtype='datetime64[ns]')}, attrs={'units': 'kelvin'}) deriv = first_derivative(test_da) @@ -1317,7 +1330,11 @@ def check_params(scalar, dx=None, dy=None, parallel_scale=None, meridional_scale if subset: temp = temp.isel(time=0).metpy.sel(vertical=500 * units.hPa) - t, dx, dy, p, m, lat, x_dim, y_dim = check_params(temp) + if datafile != 'GFS_test.nc' and (not assign_lat_lon or no_crs): + with pytest.warns(UserWarning, match='Latitude and longitude computed on-demand'): + t, dx, dy, p, m, lat, x_dim, y_dim = check_params(temp) + else: + t, dx, dy, p, m, lat, x_dim, y_dim = check_params(temp) if transpose: if subset: @@ -1403,8 +1420,9 @@ def check_params(scalar, dx=None, dy=None, x_dim=-1, y_dim=-2): }, attrs={'units': 'K'}).to_dataset().metpy.parse_cf('temperature') - with pytest.raises(AttributeError, - match='horizontal dimension coordinates cannot be found.'): + with (pytest.raises(AttributeError, + match='horizontal dimension coordinates cannot be found.'), + pytest.warns(UserWarning, match='Horizontal dimension numbers not found.')): check_params(test_da) diff --git a/tests/calc/test_cross_sections.py b/tests/calc/test_cross_sections.py index b4f70b18367..03dc4f865c8 100644 --- a/tests/calc/test_cross_sections.py +++ b/tests/calc/test_cross_sections.py @@ -67,7 +67,7 @@ def test_cross_xy(): }, coords={ 'time': xr.DataArray( - np.array([np.datetime64('2018-07-01T00:00')]), + np.array(['2018-07-01T00:00'], dtype='datetime64[ns]'), name='time', dims=['time'] ), diff --git a/tests/calc/test_indices.py b/tests/calc/test_indices.py index 1a5ab52ebcd..0a2982a65f9 100644 --- a/tests/calc/test_indices.py +++ b/tests/calc/test_indices.py @@ -158,7 +158,7 @@ def test_weighted_continuous_average_elevated(): def test_precipitable_water_xarray(): """Test precipitable water with xarray input.""" data = get_upper_air_data(datetime(2016, 5, 22, 0), 'DDC') - press = xr.DataArray(data['pressure'], attrs={'units': str(data['pressure'].units)}) + press = xr.DataArray(data['pressure'].m, attrs={'units': str(data['pressure'].units)}) dewp = xr.DataArray(data['dewpoint'], dims=('press',), coords=(press,)) pw = precipitable_water(press, dewp, top=400 * units.hPa) truth = 22.60430651 * units.millimeters diff --git a/tests/calc/test_kinematics.py b/tests/calc/test_kinematics.py index a5f120f9579..f71a75bc530 100644 --- a/tests/calc/test_kinematics.py +++ b/tests/calc/test_kinematics.py @@ -144,7 +144,7 @@ def test_vorticity_grid_pole(): u = xr.DataArray(us, name='u', coords=(y, x), dims=('y', 'x'), attrs={'units': 'm/s'}) v = xr.DataArray(vs, name='v', coords=(y, x), dims=('y', 'x'), attrs={'units': 'm/s'}) - ds = xr.merge((u, v)).metpy.assign_crs(grid) + ds = xr.merge((u, v)).metpy.assign_crs(grid).metpy.assign_latitude_longitude() vort = vorticity(ds.u, ds.v) @@ -376,6 +376,7 @@ def test_advection_4d_vertical(data_4d): assert a.data.units == units.Unit('K/sec') +@pytest.mark.filterwarnings('ignore:Horizontal dimension numbers not found.') def test_advection_1d_vertical(): """Test 1-d vertical advection with parsed dims.""" pressure = xr.DataArray( @@ -403,6 +404,7 @@ def test_advection_2d_asym(): assert_array_equal(a, truth) +@pytest.mark.filterwarnings('ignore:Vertical dimension number not found.') def test_advection_xarray(basic_dataset): """Test advection calculation using xarray support.""" a = advection(basic_dataset.temperature, basic_dataset.u, basic_dataset.v) diff --git a/tests/calc/test_thermo.py b/tests/calc/test_thermo.py index 5e7aea34c9c..a5c66a8fc45 100644 --- a/tests/calc/test_thermo.py +++ b/tests/calc/test_thermo.py @@ -203,8 +203,9 @@ def test_moist_lapse_starting_points(start, direction): @pytest.mark.xfail(platform.machine() == 'arm64', reason='ValueError is not raised on Mac M2') @pytest.mark.xfail((sys.platform == 'win32') and version_check('scipy<1.11.3'), reason='solve_ivp() does not error on Windows + SciPy < 1.11.3') -@pytest.mark.xfail(version_check('scipy<1.7'), - reason='solve_ivp() does not error on Scipy < 1.7') +@pytest.mark.filterwarnings('ignore:overflow encountered in exp:RuntimeWarning') +@pytest.mark.filterwarnings(r'ignore:invalid value encountered in \w*divide:RuntimeWarning') +@pytest.mark.filterwarnings(r'ignore:.*Excess accuracy requested.*:UserWarning') def test_moist_lapse_failure(): """Test moist_lapse under conditions that cause the ODE solver to fail.""" p = np.logspace(3, -1, 10) * units.hPa @@ -1575,9 +1576,9 @@ def test_mixed_layer_cape_cin_bottom_pressure(multiple_intersections): """Test the calculation of mixed layer cape/cin with a specified bottom pressure.""" pressure, temperature, dewpoint = multiple_intersections mlcape_middle, mlcin_middle = mixed_layer_cape_cin(pressure, temperature, dewpoint, - parcel_start_pressure=800 * units.hPa) - assert_almost_equal(mlcape_middle, 0 * units('joule / kilogram'), 2) - assert_almost_equal(mlcin_middle, 0 * units('joule / kilogram'), 2) + parcel_start_pressure=903 * units.hPa) + assert_almost_equal(mlcape_middle, 1177.86 * units('joule / kilogram'), 2) + assert_almost_equal(mlcin_middle, -37. * units('joule / kilogram'), 2) def test_dcape(): @@ -2528,7 +2529,8 @@ def test_parcel_profile_with_lcl_as_dataset_duplicates(): } ) - profile = parcel_profile_with_lcl_as_dataset(pressure, temperature, dewpoint) + with pytest.warns(UserWarning, match='Duplicate pressure'): + profile = parcel_profile_with_lcl_as_dataset(pressure, temperature, dewpoint) xr.testing.assert_allclose(profile, truth, atol=1e-5) diff --git a/tests/interpolate/test_slices.py b/tests/interpolate/test_slices.py index d992a928ff0..41eb6dc01fb 100644 --- a/tests/interpolate/test_slices.py +++ b/tests/interpolate/test_slices.py @@ -57,7 +57,7 @@ def test_ds_xy(): }, coords={ 'time': xr.DataArray( - np.array([np.datetime64('2018-07-01T00:00')]), + np.array(['2018-07-01T00:00'], dtype='datetime64[ns]'), name='time', dims=['time'] ), diff --git a/tests/io/test_gempak.py b/tests/io/test_gempak.py index 95517761608..92de1a34456 100644 --- a/tests/io/test_gempak.py +++ b/tests/io/test_gempak.py @@ -26,7 +26,8 @@ def test_grid_loading(grid_name): ) gio = grid[0].values.squeeze() - gempak = np.load(get_test_data(f'gem_packing_{grid_name}.npz'))['values'] + gempak = np.load(get_test_data(f'gem_packing_{grid_name}.npz', + as_file_obj=False))['values'] assert_allclose(gio, gempak, rtol=1e-6, atol=0) @@ -56,7 +57,8 @@ def test_merged_sounding(): gimxr = gso[0].imxr.values.squeeze() gdtar = gso[0].dtar.values.squeeze() - gempak = pd.read_csv(get_test_data('gem_model_mrg.csv'), na_values=-9999) + gempak = pd.read_csv(get_test_data('gem_model_mrg.csv', as_file_obj=False), + na_values=-9999) dpres = gempak.PRES.values dtemp = gempak.TMPC.values ddwpt = gempak.DWPC.values @@ -111,7 +113,7 @@ def test_unmerged_sounding(gem, gio, station): gsped = gso[0].sped.values.squeeze() ghght = gso[0].hght.values.squeeze() - gempak = pd.read_csv(get_test_data(f'{gem}'), na_values=-9999) + gempak = pd.read_csv(get_test_data(f'{gem}', as_file_obj=False), na_values=-9999) dpres = gempak.PRES.values dtemp = gempak.TEMP.values ddwpt = gempak.DWPT.values @@ -141,7 +143,8 @@ def test_unmerged_sigw_pressure_sounding(): gsped = gso[0].sped.values.squeeze() ghght = gso[0].hght.values.squeeze() - gempak = pd.read_csv(get_test_data('gem_sigw_pres_unmrg_man_bgl.csv'), na_values=-9999) + gempak = pd.read_csv(get_test_data('gem_sigw_pres_unmrg_man_bgl.csv', as_file_obj=False), + na_values=-9999) dpres = gempak.PRES.values dtemp = gempak.TEMP.values ddwpt = gempak.DWPT.values @@ -159,18 +162,14 @@ def test_unmerged_sigw_pressure_sounding(): def test_standard_surface(): """Test to read a standard surface file.""" - def dtparse(string): - return datetime.strptime(string, '%y%m%d/%H%M') - skip = ['text', 'spcl'] gsf = GempakSurface(get_test_data('gem_std.sfc')) gstns = gsf.sfjson() - gempak = pd.read_csv(get_test_data('gem_std.csv'), - index_col=['STN', 'YYMMDD/HHMM'], - parse_dates=['YYMMDD/HHMM'], - date_parser=dtparse) + gempak = pd.read_csv(get_test_data('gem_std.csv', as_file_obj=False)) + gempak['YYMMDD/HHMM'] = pd.to_datetime(gempak['YYMMDD/HHMM'], format='%y%m%d/%H%M') + gempak = gempak.set_index(['STN', 'YYMMDD/HHMM']) for stn in gstns: idx_key = (stn['properties']['station_id'], @@ -184,17 +183,13 @@ def dtparse(string): def test_ship_surface(): """Test to read a ship surface file.""" - def dtparse(string): - return datetime.strptime(string, '%y%m%d/%H%M') - skip = ['text', 'spcl'] gsf = GempakSurface(get_test_data('gem_ship.sfc')) - gempak = pd.read_csv(get_test_data('gem_ship.csv'), - index_col=['STN', 'YYMMDD/HHMM'], - parse_dates=['YYMMDD/HHMM'], - date_parser=dtparse) + gempak = pd.read_csv(get_test_data('gem_ship.csv', as_file_obj=False)) + gempak['YYMMDD/HHMM'] = pd.to_datetime(gempak['YYMMDD/HHMM'], format='%y%m%d/%H%M') + gempak = gempak.set_index(['STN', 'YYMMDD/HHMM']) gempak.sort_index(inplace=True) uidx = gempak.index.unique() @@ -220,7 +215,7 @@ def test_coordinates_creation(proj_type): decode_lat = grid.lat decode_lon = grid.lon - gempak = np.load(get_test_data(f'gem_{proj_type}.npz')) + gempak = np.load(get_test_data(f'gem_{proj_type}.npz', as_file_obj=False)) true_lat = gempak['lat'] true_lon = gempak['lon'] @@ -272,12 +267,10 @@ def test_date_parsing(): def test_surface_text(text_type, date_time): """Test text decoding of surface hourly and special observations.""" g = get_test_data('gem_surface_with_text.sfc') - d = get_test_data('gem_surface_with_text.csv') - gsf = GempakSurface(g) text = gsf.nearest_time(date_time, station_id='MSN')[0]['values'][text_type] - gempak = pd.read_csv(d) + gempak = pd.read_csv(get_test_data('gem_surface_with_text.csv', as_file_obj=False)) gem_text = gempak.loc[:, text_type.upper()][0] assert text == gem_text @@ -287,10 +280,9 @@ def test_surface_text(text_type, date_time): def test_sounding_text(text_type): """Test for proper decoding of coded message text.""" g = get_test_data('gem_unmerged_with_text.snd') - d = get_test_data('gem_unmerged_with_text.csv') - gso = GempakSounding(g).snxarray(station_id='OUN')[0] - gempak = pd.read_csv(d) + + gempak = pd.read_csv(get_test_data('gem_unmerged_with_text.csv', as_file_obj=False)) text = gso.attrs['WMO_CODES'][text_type] gem_text = gempak.loc[:, text_type.upper()][0] diff --git a/tests/io/test_metar.py b/tests/io/test_metar.py index 6ed6d170460..d3dad5629b8 100644 --- a/tests/io/test_metar.py +++ b/tests/io/test_metar.py @@ -212,10 +212,10 @@ def test_date_time_given(): """Test for when date_time is given.""" df = parse_metar_to_dataframe('K6B0 261200Z AUTO 00000KT 10SM CLR 20/M17 A3002 RMK AO2 ' 'T01990165=', year=2019, month=6) - assert df.date_time[0] == datetime(2019, 6, 26, 12) - assert df.eastward_wind[0] == 0 - assert df.northward_wind[0] == 0 - assert_almost_equal(df.air_pressure_at_sea_level[0], 1016.56) + assert df.iloc[0].date_time == datetime(2019, 6, 26, 12) + assert df.iloc[0].eastward_wind == 0 + assert df.iloc[0].northward_wind == 0 + assert_almost_equal(df.iloc[0].air_pressure_at_sea_level, 1016.56) assert_almost_equal(df.visibility.values, 16093.44) @@ -372,7 +372,7 @@ def test_parse_no_pint_objects_in_df(): for df in (parse_metar_file(input_file), parse_metar_to_dataframe(metar_str)): for column in df: - assert not is_quantity(df[column][0]) + assert not is_quantity(df.iloc[0][column]) def test_repr(): diff --git a/tests/io/test_nexrad.py b/tests/io/test_nexrad.py index 2d227021ac9..46f0cf98863 100644 --- a/tests/io/test_nexrad.py +++ b/tests/io/test_nexrad.py @@ -72,8 +72,15 @@ def read(self, n=None): """Read bytes.""" return self._f.read(n) + def close(self): + """Close object.""" + return self._f.close() + f = SeeklessReader(f) - Level2File(f) + + # Need to close manually (since we own the fboj) to avoid a warning + with contextlib.closing(f): + Level2File(f) def test_doubled_file(): diff --git a/tests/plots/test_declarative.py b/tests/plots/test_declarative.py index e075215d0d6..407736a25bd 100644 --- a/tests/plots/test_declarative.py +++ b/tests/plots/test_declarative.py @@ -5,6 +5,7 @@ from datetime import datetime, timedelta from io import BytesIO +from unittest.mock import patch, PropertyMock import warnings import matplotlib.pyplot as plt @@ -184,7 +185,7 @@ def test_declarative_smooth_contour(): def test_declarative_smooth_contour_calculation(): """Test making a contour plot using smooth_contour.""" data = xr.open_dataset(get_test_data('narr_example.nc', as_file_obj=False)) - data = data.metpy.parse_cf() + data = data.metpy.parse_cf().metpy.assign_latitude_longitude() data['wind_speed'] = wind_speed(data['u_wind'], data['v_wind']) @@ -569,6 +570,8 @@ def test_ndim_error_scalar(cfeature): with pytest.raises(ValueError): pc.draw() + plt.close(pc.figure) + def test_ndim_error_vector(cfeature): """Make sure we get a useful error when the field is not set.""" @@ -590,6 +593,8 @@ def test_ndim_error_vector(cfeature): with pytest.raises(ValueError): pc.draw() + plt.close(pc.figure) + def test_no_field_error_barbs(): """Make sure we get a useful error when the field is not set.""" @@ -835,7 +840,7 @@ def test_latlon(): return pc.figure -@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.343) +@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.393) @needs_cartopy def test_declarative_barb_options(): """Test making a contour plot.""" @@ -957,7 +962,7 @@ def test_declarative_arrow_changes(): return pc.figure -@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.86) +@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.891) @needs_cartopy def test_declarative_barb_earth_relative(): """Test making a contour plot.""" @@ -1194,6 +1199,14 @@ def sample_obs(): columns=['time', 'stid', 'pressure', 'temperature', 'dewpoint']) +@pytest.fixture() +def pandas_sfc(): + """Open sample pandas data.""" + df = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False)) + df['valid'] = pd.to_datetime(df['valid'], format='%Y-%m-%d %H:%M:%S') + return df + + def test_plotobs_subset_default_nolevel(sample_obs): """Test PlotObs subsetting with minimal config.""" obs = PlotObs() @@ -1284,15 +1297,16 @@ def test_plotobs_subset_time_window_level(sample_obs): @pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.016) -def test_plotobs_units_with_formatter(ccrs): +def test_plotobs_units_with_formatter(ccrs, pandas_sfc): """Test using PlotObs with a field that both has units and a custom formatter.""" - df = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False), - infer_datetime_format=True, parse_dates=['valid']) - df.units = {'alti': 'inHg'} + # Catch warning from Pandas due to setting units + with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) + pandas_sfc.units = {'alti': 'inHg'} # Plot desired data obs = PlotObs() - obs.data = df + obs.data = pandas_sfc obs.time = datetime(1993, 3, 12, 12) obs.time_window = timedelta(minutes=15) obs.level = None @@ -1320,13 +1334,10 @@ def test_plotobs_units_with_formatter(ccrs): @pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.025) -def test_declarative_sfc_obs(ccrs): +def test_declarative_sfc_obs(ccrs, pandas_sfc): """Test making a surface observation plot.""" - data = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False), - infer_datetime_format=True, parse_dates=['valid']) - obs = PlotObs() - obs.data = data + obs.data = pandas_sfc obs.time = datetime(1993, 3, 12, 12) obs.time_window = timedelta(minutes=15) obs.level = None @@ -1352,13 +1363,10 @@ def test_declarative_sfc_obs(ccrs): @pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.025) -def test_declarative_sfc_obs_args(ccrs): +def test_declarative_sfc_obs_args(ccrs, pandas_sfc): """Test making a surface observation plot with mpl arguments.""" - data = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False), - infer_datetime_format=True, parse_dates=['valid']) - obs = PlotObs() - obs.data = data + obs.data = pandas_sfc obs.time = datetime(1993, 3, 12, 12) obs.time_window = timedelta(minutes=15) obs.level = None @@ -1386,13 +1394,10 @@ def test_declarative_sfc_obs_args(ccrs): @pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.016) @needs_cartopy -def test_declarative_sfc_text(): +def test_declarative_sfc_text(pandas_sfc): """Test making a surface observation plot with text.""" - data = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False), - infer_datetime_format=True, parse_dates=['valid']) - obs = PlotObs() - obs.data = data + obs.data = pandas_sfc obs.time = datetime(1993, 3, 12, 12) obs.time_window = timedelta(minutes=15) obs.level = None @@ -1419,13 +1424,10 @@ def test_declarative_sfc_text(): @pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.025) -def test_declarative_sfc_obs_changes(ccrs): +def test_declarative_sfc_obs_changes(ccrs, pandas_sfc): """Test making a surface observation plot, changing the field.""" - data = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False), - infer_datetime_format=True, parse_dates=['valid']) - obs = PlotObs() - obs.data = data + obs.data = pandas_sfc obs.time = datetime(1993, 3, 12, 12) obs.level = None obs.fields = ['tmpf'] @@ -1455,13 +1457,10 @@ def test_declarative_sfc_obs_changes(ccrs): @pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.171) -def test_declarative_colored_barbs(ccrs): +def test_declarative_colored_barbs(ccrs, pandas_sfc): """Test making a surface plot with a colored barb (gh-1274).""" - data = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False), - infer_datetime_format=True, parse_dates=['valid']) - obs = PlotObs() - obs.data = data + obs.data = pandas_sfc obs.time = datetime(1993, 3, 12, 13) obs.level = None obs.vector_field = ('uwind', 'vwind') @@ -1487,13 +1486,10 @@ def test_declarative_colored_barbs(ccrs): @pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.305) -def test_declarative_sfc_obs_full(ccrs): +def test_declarative_sfc_obs_full(ccrs, pandas_sfc): """Test making a full surface observation plot.""" - data = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False), - infer_datetime_format=True, parse_dates=['valid']) - obs = PlotObs() - obs.data = data + obs.data = pandas_sfc obs.time = datetime(1993, 3, 12, 13) obs.time_window = timedelta(minutes=15) obs.level = None @@ -1524,7 +1520,7 @@ def test_declarative_sfc_obs_full(ccrs): return pc.figure -@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.355) +@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.522) @needs_cartopy def test_declarative_upa_obs(): """Test making a full upperair observation plot.""" @@ -1561,7 +1557,7 @@ def test_declarative_upa_obs(): return pc.figure -@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.473) +@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.518) @needs_cartopy def test_declarative_upa_obs_convert_barb_units(): """Test making a full upperair observation plot with barbs converting units.""" @@ -1604,14 +1600,12 @@ def test_declarative_upa_obs_convert_barb_units(): return pc.figure -def test_attribute_error_time(ccrs): +def test_attribute_error_time(ccrs, pandas_sfc): """Make sure we get a useful error when the time variable is not found.""" - data = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False), - infer_datetime_format=True, parse_dates=['valid']) - data.rename(columns={'valid': 'vtime'}, inplace=True) + pandas_sfc.rename(columns={'valid': 'vtime'}, inplace=True) obs = PlotObs() - obs.data = data + obs.data = pandas_sfc obs.time = datetime(1993, 3, 12, 12) obs.level = None obs.fields = ['tmpf'] @@ -1634,15 +1628,15 @@ def test_attribute_error_time(ccrs): with pytest.raises(AttributeError): pc.draw() + plt.close(pc.figure) + -def test_attribute_error_station(ccrs): +def test_attribute_error_station(ccrs, pandas_sfc): """Make sure we get a useful error when the station variable is not found.""" - data = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False), - infer_datetime_format=True, parse_dates=['valid']) - data.rename(columns={'station': 'location'}, inplace=True) + pandas_sfc.rename(columns={'station': 'location'}, inplace=True) obs = PlotObs() - obs.data = data + obs.data = pandas_sfc obs.time = datetime(1993, 3, 12, 12) obs.level = None obs.fields = ['tmpf'] @@ -1665,6 +1659,8 @@ def test_attribute_error_station(ccrs): with pytest.raises(AttributeError): pc.draw() + plt.close(pc.figure) + @pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.024) def test_declarative_sfc_obs_change_units(ccrs): @@ -1983,6 +1979,7 @@ def test_save(): pc = PanelContainer() fobj = BytesIO() pc.save(fobj, format='png') + plt.close(pc.figure) fobj.seek(0) @@ -1990,14 +1987,14 @@ def test_save(): assert fobj.read() -def test_show(set_agg_backend): +def test_show(): """Test that show works properly.""" pc = PanelContainer() - - # Matplotlib warns when using show with Agg - with warnings.catch_warnings(): - warnings.simplefilter('ignore', UserWarning) + with patch.object(plt, 'show', new_callable=PropertyMock) as show: pc.show() + show.assert_called() + + plt.close(pc.figure) @needs_cartopy @@ -2039,6 +2036,7 @@ def test_copy(): copied_obj = obj.copy() assert obj is not copied_obj assert obj.size == copied_obj.size + plt.close(obj.figure) # Copies of plots in MapPanels should not point to same location in memory obj = MapPanel() diff --git a/tests/plots/test_mpl.py b/tests/plots/test_mpl.py index 3dfd603be8b..4da999f8cc9 100644 --- a/tests/plots/test_mpl.py +++ b/tests/plots/test_mpl.py @@ -6,11 +6,11 @@ from tempfile import TemporaryFile import matplotlib.patheffects as mpatheffects -import matplotlib.pyplot as plt import numpy as np # Needed to trigger scattertext monkey-patching import metpy.plots # noqa: F401, I202 +from metpy.testing import autoclose_figure # Avoiding an image-based test here since that would involve text, which can be tricky @@ -19,11 +19,11 @@ def test_scattertext_patheffect_empty(): """Test scattertext with empty strings and PathEffects (Issue #245).""" strings = ['abc', '', 'def'] x, y = np.arange(6).reshape(2, 3) - fig = plt.figure() - ax = fig.add_subplot(1, 1, 1) - ax.scattertext(x, y, strings, color='white', - path_effects=[mpatheffects.withStroke(linewidth=1, foreground='black')]) + with autoclose_figure() as fig: + ax = fig.add_subplot(1, 1, 1) + ax.scattertext(x, y, strings, color='white', + path_effects=[mpatheffects.withStroke(linewidth=1, foreground='black')]) - # Need to trigger a render - with TemporaryFile('wb') as fobj: - fig.savefig(fobj) + # Need to trigger a render + with TemporaryFile('wb') as fobj: + fig.savefig(fobj) diff --git a/tests/plots/test_skewt.py b/tests/plots/test_skewt.py index 433fa095491..2a66389a3f7 100644 --- a/tests/plots/test_skewt.py +++ b/tests/plots/test_skewt.py @@ -11,7 +11,7 @@ import pytest from metpy.plots import Hodograph, SkewT -from metpy.testing import version_check +from metpy.testing import autoclose_figure, version_check from metpy.units import units @@ -126,7 +126,8 @@ def test_skewt_with_grid_enabled(): """Test using SkewT when gridlines are already enabled (#271).""" with plt.rc_context(rc={'axes.grid': True}): # Also tests when we don't pass in Figure - SkewT(aspect='auto') + s = SkewT(aspect='auto') + plt.close(s.ax.figure) @pytest.mark.mpl_image_compare(tolerance=0., remove_text=True, style='default') @@ -139,8 +140,8 @@ def test_skewt_arbitrary_rect(): def test_skewt_subplot_rect_conflict(): """Test the subplot/rect conflict failure.""" - with pytest.raises(ValueError): - SkewT(rect=(0.15, 0.35, 0.8, 0.3), subplot=(1, 1, 1)) + with pytest.raises(ValueError), autoclose_figure(figsize=(7, 7)) as fig: + SkewT(fig, rect=(0.15, 0.35, 0.8, 0.3), subplot=(1, 1, 1)) @pytest.mark.mpl_image_compare(tolerance=0.0198, remove_text=True, style='default') @@ -262,12 +263,12 @@ def test_skewt_shade_area(test_profile): def test_skewt_shade_area_invalid(test_profile): """Test shading areas on a SkewT plot.""" p, t, _, tp = test_profile - fig = plt.figure(figsize=(9, 9)) - skew = SkewT(fig, aspect='auto') - skew.plot(p, t, 'r') - skew.plot(p, tp, 'k') - with pytest.raises(ValueError): - skew.shade_area(p, t, tp, which='positve') + with autoclose_figure(figsize=(9, 9)) as fig: + skew = SkewT(fig, aspect='auto') + skew.plot(p, t, 'r') + skew.plot(p, tp, 'k') + with pytest.raises(ValueError): + skew.shade_area(p, t, tp, which='positve') @pytest.mark.mpl_image_compare(tolerance=0.033, remove_text=True, style='default') @@ -356,7 +357,8 @@ def test_hodograph_masked_array(): def test_hodograph_alone(): """Test to create Hodograph without specifying axes.""" - Hodograph() + h = Hodograph() + plt.close(h.ax.figure) @pytest.mark.mpl_image_compare(tolerance=0, remove_text=True) @@ -430,10 +432,10 @@ def test_skewt_barb_unit_conversion_exception(u, v): """Test that an error is raised if unit conversion is requested on plain arrays.""" p_wind = np.array([500]) * units.hPa - fig = plt.figure(figsize=(9, 9)) - skew = SkewT(fig, aspect='auto') - with pytest.raises(ValueError): - skew.plot_barbs(p_wind, u, v, plot_units='knots') + with autoclose_figure(figsize=(9, 9)) as fig: + skew = SkewT(fig, aspect='auto') + with pytest.raises(ValueError): + skew.plot_barbs(p_wind, u, v, plot_units='knots') @pytest.mark.mpl_image_compare(tolerance=0, remove_text=True) @@ -571,6 +573,6 @@ def test_hodograph_wind_vectors(): def test_hodograph_range_with_units(): """Tests making a hodograph with a range with units.""" - fig = plt.figure(figsize=(6, 6)) - ax = fig.add_subplot(1, 1, 1) - Hodograph(ax, component_range=60. * units.knots) + with autoclose_figure(figsize=(6, 6)) as fig: + ax = fig.add_subplot(1, 1, 1) + Hodograph(ax, component_range=60. * units.knots) diff --git a/tests/plots/test_station_plot.py b/tests/plots/test_station_plot.py index 80012bc9ab0..54dde8dc52d 100644 --- a/tests/plots/test_station_plot.py +++ b/tests/plots/test_station_plot.py @@ -11,6 +11,7 @@ from metpy.plots import (current_weather, high_clouds, nws_layout, simple_layout, sky_cover, StationPlot, StationPlotLayout) +from metpy.testing import autoclose_figure from metpy.units import units @@ -145,8 +146,6 @@ def test_stationlayout_api(): def test_station_layout_odd_data(): """Test more corner cases with data passed in.""" - fig = plt.figure(figsize=(9, 9)) - # Set up test layout layout = StationPlotLayout() layout.add_barb('u', 'v') @@ -156,9 +155,9 @@ def test_station_layout_odd_data(): data = {'temperature': [25.]} # Make the plot - sp = StationPlot(fig.add_subplot(1, 1, 1), [1], [2], fontsize=12) - layout.plot(sp, data) - assert True + with autoclose_figure(figsize=(9, 9)) as fig: + sp = StationPlot(fig.add_subplot(1, 1, 1), [1], [2], fontsize=12) + layout.plot(sp, data) def test_station_layout_replace(): @@ -288,7 +287,7 @@ def wind_plot(): return u, v, x, y -@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.499) +@pytest.mark.mpl_image_compare(remove_text=True, tolerance=0.5) def test_barb_projection(wind_plot, ccrs): """Test that barbs are properly projected (#598).""" u, v, x, y = wind_plot @@ -333,22 +332,22 @@ def test_barb_projection_list(wind_projection_list): """Test that barbs will be projected when lat/lon lists are provided.""" lat, lon, u, v = wind_projection_list - fig = plt.figure() - ax = fig.add_subplot(1, 1, 1) - stnplot = StationPlot(ax, lon, lat) - stnplot.plot_barb(u, v) - assert stnplot.barbs + with autoclose_figure() as fig: + ax = fig.add_subplot(1, 1, 1) + stnplot = StationPlot(ax, lon, lat) + stnplot.plot_barb(u, v) + assert stnplot.barbs def test_arrow_projection_list(wind_projection_list): """Test that arrows will be projected when lat/lon lists are provided.""" lat, lon, u, v = wind_projection_list - fig = plt.figure() - ax = fig.add_subplot(1, 1, 1) - stnplot = StationPlot(ax, lon, lat) - stnplot.plot_arrow(u, v) - assert stnplot.arrows + with autoclose_figure() as fig: + ax = fig.add_subplot(1, 1, 1) + stnplot = StationPlot(ax, lon, lat) + stnplot.plot_arrow(u, v) + assert stnplot.arrows @pytest.fixture @@ -416,11 +415,11 @@ def test_barb_unit_conversion_exception(u, v): x_pos = np.array([0]) y_pos = np.array([0]) - fig = plt.figure() - ax = fig.add_subplot(1, 1, 1) - stnplot = StationPlot(ax, x_pos, y_pos) - with pytest.raises(ValueError): - stnplot.plot_barb(u, v, plot_units='knots') + with autoclose_figure() as fig: + ax = fig.add_subplot(1, 1, 1) + stnplot = StationPlot(ax, x_pos, y_pos) + with pytest.raises(ValueError): + stnplot.plot_barb(u, v, plot_units='knots') @pytest.mark.mpl_image_compare(tolerance=0.021, savefig_kwargs={'dpi': 300}, remove_text=True) @@ -467,8 +466,8 @@ def test_scalar_unit_conversion_exception(): x_pos = np.array([0]) y_pos = np.array([0]) - fig = plt.figure() - ax = fig.add_subplot(1, 1, 1) - stnplot = StationPlot(ax, x_pos, y_pos) - with pytest.raises(ValueError): - stnplot.plot_parameter('C', 50, plot_units='degC') + with autoclose_figure() as fig: + ax = fig.add_subplot(1, 1, 1) + stnplot = StationPlot(ax, x_pos, y_pos) + with pytest.raises(ValueError): + stnplot.plot_parameter('C', 50, plot_units='degC') diff --git a/tests/plots/test_util.py b/tests/plots/test_util.py index 6d77ae33ef9..96b366cd878 100644 --- a/tests/plots/test_util.py +++ b/tests/plots/test_util.py @@ -11,7 +11,7 @@ import xarray as xr from metpy.plots import add_metpy_logo, add_timestamp, add_unidata_logo, convert_gempak_color -from metpy.testing import get_test_data, version_check +from metpy.testing import autoclose_figure, get_test_data, version_check @pytest.mark.mpl_image_compare(tolerance=2.638, remove_text=True) @@ -52,12 +52,12 @@ def test_add_timestamp_high_contrast(): def test_add_timestamp_xarray(): """Test that add_timestamp can work with xarray datetime accessor.""" - fig = plt.figure() - ax = fig.add_subplot(1, 1, 1) - ds = xr.open_dataset(get_test_data('AK-REGIONAL_8km_3.9_20160408_1445.gini'), - engine='gini') - txt = add_timestamp(ax, ds.time.dt, pretext='') - assert txt.get_text() == '2016-04-08T14:45:20Z' + with autoclose_figure() as fig: + ax = fig.add_subplot(1, 1, 1) + ds = xr.open_dataset(get_test_data('AK-REGIONAL_8km_3.9_20160408_1445.gini'), + engine='gini') + txt = add_timestamp(ax, ds.time.dt, pretext='') + assert txt.get_text() == '2016-04-08T14:45:20Z' @pytest.mark.mpl_image_compare(tolerance=0.004, remove_text=True) @@ -86,8 +86,7 @@ def test_add_unidata_logo(): def test_add_logo_invalid_size(): """Test adding a logo to a figure with an invalid size specification.""" - fig = plt.figure(figsize=(9, 9)) - with pytest.raises(ValueError): + with pytest.raises(ValueError), autoclose_figure(figsize=(9, 9)) as fig: add_metpy_logo(fig, size='jumbo') diff --git a/tests/test_xarray.py b/tests/test_xarray.py index ef767f56972..212c07a6c13 100644 --- a/tests/test_xarray.py +++ b/tests/test_xarray.py @@ -193,7 +193,7 @@ def test_quantify(test_ds_generic): assert is_quantity(result.data) assert result.data.units == units.kelvin assert 'units' not in result.attrs - np.testing.assert_array_almost_equal(result.data, units.Quantity(original)) + assert_array_almost_equal(result.data, units.Quantity(original, 'K')) def test_dequantify(): @@ -213,9 +213,9 @@ def test_dataset_quantify(test_ds_generic): assert is_quantity(result['test'].data) assert result['test'].data.units == units.kelvin assert 'units' not in result['test'].attrs - np.testing.assert_array_almost_equal( + assert_array_almost_equal( result['test'].data, - units.Quantity(test_ds_generic['test'].data) + units.Quantity(test_ds_generic['test'].data, 'K') ) assert result.attrs == test_ds_generic.attrs @@ -1354,7 +1354,7 @@ def test_preprocess_and_wrap_with_to_magnitude(): def func(a, b): return a * b - np.testing.assert_array_equal(func(data, data2), np.array([0, 0, 1])) + assert_array_equal(func(data, data2), np.array([0, 0, 1])) def test_preprocess_and_wrap_with_variable(): @@ -1377,9 +1377,9 @@ def func(a, b): result_21 = func(data2, data1) assert isinstance(result_12, xr.DataArray) - xr.testing.assert_identical(func(data1, data2), expected_12) + xr.testing.assert_identical(result_12, expected_12) assert is_quantity(result_21) - assert_array_equal(func(data2, data1), expected_21) + assert_array_equal(result_21, expected_21) def test_grid_deltas_from_dataarray_lonlat(test_da_lonlat):