From 1c390d93379e01b7cd9a68c760e7b70923e3c221 Mon Sep 17 00:00:00 2001 From: Maximilian Roos Date: Sat, 21 Aug 2021 12:51:30 -0700 Subject: [PATCH 1/5] Type annotate lots of tests --- properties/test_encode_decode.py | 4 +- xarray/core/common.py | 8 +- xarray/core/computation.py | 2 +- xarray/core/coordinates.py | 2 +- xarray/core/dataarray.py | 35 ++-- xarray/core/dataset.py | 34 ++-- xarray/core/indexes.py | 8 +- xarray/core/merge.py | 17 +- xarray/core/rolling_exp.py | 4 +- xarray/core/utils.py | 6 +- xarray/core/variable.py | 17 +- xarray/tests/test_accessor_dt.py | 62 ++++--- xarray/tests/test_accessor_str.py | 200 +++++++++++---------- xarray/tests/test_backends_api.py | 4 +- xarray/tests/test_backends_common.py | 2 +- xarray/tests/test_backends_file_manager.py | 35 ++-- xarray/tests/test_backends_locks.py | 2 +- xarray/tests/test_backends_lru_cache.py | 30 ++-- xarray/tests/test_cftimeindex_resample.py | 6 +- xarray/tests/test_coarsen.py | 18 +- xarray/tests/test_coding.py | 18 +- xarray/tests/test_coding_strings.py | 32 ++-- xarray/tests/test_coding_times.py | 119 ++++++------ xarray/tests/test_computation.py | 118 ++++++------ xarray/tests/test_conventions.py | 52 +++--- xarray/tests/test_cupy.py | 6 +- xarray/tests/test_distributed.py | 14 +- xarray/tests/test_extensions.py | 17 +- xarray/tests/test_formatting.py | 44 ++--- xarray/tests/test_formatting_html.py | 41 +++-- xarray/tests/test_groupby.py | 82 ++++----- xarray/tests/test_indexes.py | 30 ++-- xarray/tests/test_indexing.py | 74 ++++---- xarray/tests/test_nputils.py | 4 +- xarray/tests/test_options.py | 34 ++-- xarray/tests/test_plugins.py | 32 ++-- xarray/tests/test_print_versions.py | 2 +- xarray/tests/test_testing.py | 10 +- xarray/tests/test_tutorial.py | 6 +- 39 files changed, 627 insertions(+), 604 deletions(-) diff --git a/properties/test_encode_decode.py b/properties/test_encode_decode.py index 4b0643cb2fe..3ba037e28b0 100644 --- a/properties/test_encode_decode.py +++ b/properties/test_encode_decode.py @@ -25,7 +25,7 @@ @pytest.mark.slow @given(st.data(), an_array) -def test_CFMask_coder_roundtrip(data, arr): +def test_CFMask_coder_roundtrip(data, arr) -> None: names = data.draw( st.lists(st.text(), min_size=arr.ndim, max_size=arr.ndim, unique=True).map( tuple @@ -39,7 +39,7 @@ def test_CFMask_coder_roundtrip(data, arr): @pytest.mark.slow @given(st.data(), an_array) -def test_CFScaleOffset_coder_roundtrip(data, arr): +def test_CFScaleOffset_coder_roundtrip(data, arr) -> None: names = data.draw( st.lists(st.text(), min_size=arr.ndim, max_size=arr.ndim, unique=True).map( tuple diff --git a/xarray/core/common.py b/xarray/core/common.py index ab822f576d3..55c4e3376ee 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -409,7 +409,7 @@ def get_index(self, key: Hashable) -> pd.Index: return pd.Index(range(self.sizes[key]), name=key) def _calc_assign_results( - self: C, kwargs: Mapping[Hashable, Union[T, Callable[[C], T]]] + self: C, kwargs: Mapping[Any, Union[T, Callable[[C], T]]] ) -> Dict[Hashable, T]: return {k: v(self) if callable(v) else v for k, v in kwargs.items()} @@ -820,7 +820,7 @@ def rolling( self, dim: Mapping[Hashable, int] = None, min_periods: int = None, - center: Union[bool, Mapping[Hashable, bool]] = False, + center: Union[bool, Mapping[Any, bool]] = False, **window_kwargs: int, ): """ @@ -935,7 +935,7 @@ def coarsen( self, dim: Mapping[Hashable, int] = None, boundary: str = "exact", - side: Union[str, Mapping[Hashable, str]] = "left", + side: Union[str, Mapping[Any, str]] = "left", coord_func: str = "mean", **window_kwargs: int, ): @@ -1520,7 +1520,7 @@ def __getitem__(self, value): def full_like( other: "Dataset", fill_value, - dtype: Union[DTypeLike, Mapping[Hashable, DTypeLike]] = None, + dtype: Union[DTypeLike, Mapping[Any, DTypeLike]] = None, ) -> "Dataset": ... diff --git a/xarray/core/computation.py b/xarray/core/computation.py index cd9e22d90db..1906f149ae7 100644 --- a/xarray/core/computation.py +++ b/xarray/core/computation.py @@ -370,7 +370,7 @@ def _as_variables_or_variable(arg): def _unpack_dict_tuples( - result_vars: Mapping[Hashable, Tuple[Variable, ...]], num_outputs: int + result_vars: Mapping[Any, Tuple[Variable, ...]], num_outputs: int ) -> Tuple[Dict[Hashable, Variable], ...]: out: Tuple[Dict[Hashable, Variable], ...] = tuple({} for _ in range(num_outputs)) for name, values in result_vars.items(): diff --git a/xarray/core/coordinates.py b/xarray/core/coordinates.py index 767b76d0d12..3f47eaa4554 100644 --- a/xarray/core/coordinates.py +++ b/xarray/core/coordinates.py @@ -31,7 +31,7 @@ _THIS_ARRAY = ReprObject("") -class Coordinates(Mapping[Hashable, "DataArray"]): +class Coordinates(Mapping[Any, "DataArray"]): __slots__ = () def __getitem__(self, key: Hashable) -> "DataArray": diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index 900af885319..673decd8fd8 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -365,7 +365,7 @@ class DataArray(AbstractArray, DataWithCoords, DataArrayArithmetic): def __init__( self, data: Any = dtypes.NA, - coords: Union[Sequence[Tuple], Mapping[Hashable, Any], None] = None, + coords: Union[Sequence[Tuple], Mapping[Any, Any], None] = None, dims: Union[Hashable, Sequence[Hashable], None] = None, name: Hashable = None, attrs: Mapping = None, @@ -787,7 +787,8 @@ def loc(self) -> _LocIndexer: return _LocIndexer(self) @property - def attrs(self) -> Dict[Hashable, Any]: + # Key type needs to be `Any` because of mypy#4167 + def attrs(self) -> Dict[Any, Any]: """Dictionary storing arbitrary metadata with this array.""" return self.variable.attrs @@ -1067,7 +1068,7 @@ def chunk( int, Tuple[int, ...], Tuple[Tuple[int, ...], ...], - Mapping[Hashable, Union[None, int, Tuple[int, ...]]], + Mapping[Any, Union[None, int, Tuple[int, ...]]], ] = {}, # {} even though it's technically unsafe, is being used intentionally here (#4667) name_prefix: str = "xarray-", token: str = None, @@ -1311,7 +1312,7 @@ def sel( def head( self, - indexers: Union[Mapping[Hashable, int], int] = None, + indexers: Union[Mapping[Any, int], int] = None, **indexers_kwargs: Any, ) -> "DataArray": """Return a new DataArray whose data is given by the the first `n` @@ -1328,7 +1329,7 @@ def head( def tail( self, - indexers: Union[Mapping[Hashable, int], int] = None, + indexers: Union[Mapping[Any, int], int] = None, **indexers_kwargs: Any, ) -> "DataArray": """Return a new DataArray whose data is given by the the last `n` @@ -1345,7 +1346,7 @@ def tail( def thin( self, - indexers: Union[Mapping[Hashable, int], int] = None, + indexers: Union[Mapping[Any, int], int] = None, **indexers_kwargs: Any, ) -> "DataArray": """Return a new DataArray whose data is given by each `n` value @@ -1777,7 +1778,7 @@ def interp_like( def rename( self, - new_name_or_name_dict: Union[Hashable, Mapping[Hashable, Hashable]] = None, + new_name_or_name_dict: Union[Hashable, Mapping[Any, Hashable]] = None, **names: Hashable, ) -> "DataArray": """Returns a new DataArray with renamed coordinates or a new name. @@ -1873,7 +1874,7 @@ def swap_dims( def expand_dims( self, - dim: Union[None, Hashable, Sequence[Hashable], Mapping[Hashable, Any]] = None, + dim: Union[None, Hashable, Sequence[Hashable], Mapping[Any, Any]] = None, axis=None, **dim_kwargs: Any, ) -> "DataArray": @@ -1925,7 +1926,7 @@ def expand_dims( def set_index( self, - indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]] = None, + indexes: Mapping[Any, Union[Hashable, Sequence[Hashable]]] = None, append: bool = False, **indexes_kwargs: Union[Hashable, Sequence[Hashable]], ) -> "DataArray": @@ -2013,7 +2014,7 @@ def reset_index( def reorder_levels( self, - dim_order: Mapping[Hashable, Sequence[int]] = None, + dim_order: Mapping[Any, Sequence[int]] = None, **dim_order_kwargs: Sequence[int], ) -> "DataArray": """Rearrange index levels using input order. @@ -2048,7 +2049,7 @@ def reorder_levels( def stack( self, - dimensions: Mapping[Hashable, Sequence[Hashable]] = None, + dimensions: Mapping[Any, Sequence[Hashable]] = None, **dimensions_kwargs: Sequence[Hashable], ) -> "DataArray": """ @@ -3867,17 +3868,13 @@ def polyfit( def pad( self, - pad_width: Mapping[Hashable, Union[int, Tuple[int, int]]] = None, + pad_width: Mapping[Any, Union[int, Tuple[int, int]]] = None, mode: str = "constant", - stat_length: Union[ - int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]] - ] = None, + stat_length: Union[int, Tuple[int, int], Mapping[Any, Tuple[int, int]]] = None, constant_values: Union[ - int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]] - ] = None, - end_values: Union[ - int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]] + int, Tuple[int, int], Mapping[Any, Tuple[int, int]] ] = None, + end_values: Union[int, Tuple[int, int], Mapping[Any, Tuple[int, int]]] = None, reflect_type: str = None, **pad_width_kwargs: Any, ) -> "DataArray": diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 4bfc1ccbdf1..848367d100b 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -212,8 +212,8 @@ def calculate_dimensions(variables: Mapping[Hashable, Variable]) -> Dict[Hashabl def merge_indexes( - indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]], - variables: Mapping[Hashable, Variable], + indexes: Mapping[Any, Union[Hashable, Sequence[Hashable]]], + variables: Mapping[Any, Variable], coord_names: Set[Hashable], append: bool = False, ) -> Tuple[Dict[Hashable, Variable], Set[Hashable]]: @@ -512,7 +512,7 @@ def _initialize_feasible(lb, ub): return param_defaults, bounds_defaults -class DataVariables(Mapping[Hashable, "DataArray"]): +class DataVariables(Mapping[Any, "DataArray"]): __slots__ = ("_dataset",) def __init__(self, dataset: "Dataset"): @@ -2107,7 +2107,7 @@ def chunk( chunks: Union[ int, str, - Mapping[Hashable, Union[None, int, str, Tuple[int, ...]]], + Mapping[Any, Union[None, int, str, Tuple[int, ...]]], ] = {}, # {} even though it's technically unsafe, is being used intentionally here (#4667) name_prefix: str = "xarray-", token: str = None, @@ -2482,7 +2482,7 @@ def sel( def head( self, - indexers: Union[Mapping[Hashable, int], int] = None, + indexers: Union[Mapping[Any, int], int] = None, **indexers_kwargs: Any, ) -> "Dataset": """Returns a new dataset with the first `n` values of each array @@ -2528,7 +2528,7 @@ def head( def tail( self, - indexers: Union[Mapping[Hashable, int], int] = None, + indexers: Union[Mapping[Any, int], int] = None, **indexers_kwargs: Any, ) -> "Dataset": """Returns a new dataset with the last `n` values of each array @@ -2577,7 +2577,7 @@ def tail( def thin( self, - indexers: Union[Mapping[Hashable, int], int] = None, + indexers: Union[Mapping[Any, int], int] = None, **indexers_kwargs: Any, ) -> "Dataset": """Returns a new dataset with each array indexed along every `n`-th @@ -3556,7 +3556,7 @@ def swap_dims( def expand_dims( self, - dim: Union[None, Hashable, Sequence[Hashable], Mapping[Hashable, Any]] = None, + dim: Union[None, Hashable, Sequence[Hashable], Mapping[Any, Any]] = None, axis: Union[None, int, Sequence[int]] = None, **dim_kwargs: Any, ) -> "Dataset": @@ -3688,7 +3688,7 @@ def expand_dims( def set_index( self, - indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]] = None, + indexes: Mapping[Any, Union[Hashable, Sequence[Hashable]]] = None, append: bool = False, **indexes_kwargs: Union[Hashable, Sequence[Hashable]], ) -> "Dataset": @@ -3786,7 +3786,7 @@ def reset_index( def reorder_levels( self, - dim_order: Mapping[Hashable, Sequence[int]] = None, + dim_order: Mapping[Any, Sequence[int]] = None, **dim_order_kwargs: Sequence[int], ) -> "Dataset": """Rearrange index levels using input order. @@ -3855,7 +3855,7 @@ def _stack_once(self, dims, new_dim): def stack( self, - dimensions: Mapping[Hashable, Sequence[Hashable]] = None, + dimensions: Mapping[Any, Sequence[Hashable]] = None, **dimensions_kwargs: Sequence[Hashable], ) -> "Dataset": """ @@ -6926,17 +6926,13 @@ def polyfit( def pad( self, - pad_width: Mapping[Hashable, Union[int, Tuple[int, int]]] = None, + pad_width: Mapping[Any, Union[int, Tuple[int, int]]] = None, mode: str = "constant", - stat_length: Union[ - int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]] - ] = None, + stat_length: Union[int, Tuple[int, int], Mapping[Any, Tuple[int, int]]] = None, constant_values: Union[ - int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]] - ] = None, - end_values: Union[ - int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]] + int, Tuple[int, int], Mapping[Any, Tuple[int, int]] ] = None, + end_values: Union[int, Tuple[int, int], Mapping[Any, Tuple[int, int]]] = None, reflect_type: str = None, **pad_width_kwargs: Any, ) -> "Dataset": diff --git a/xarray/core/indexes.py b/xarray/core/indexes.py index 429c37af588..95b6ccaad30 100644 --- a/xarray/core/indexes.py +++ b/xarray/core/indexes.py @@ -34,7 +34,7 @@ class Index: @classmethod def from_variables( - cls, variables: Mapping[Hashable, "Variable"] + cls, variables: Mapping[Any, "Variable"] ) -> Tuple["Index", Optional[IndexVars]]: # pragma: no cover raise NotImplementedError() @@ -153,7 +153,7 @@ def __init__(self, array: Any, dim: Hashable): self.dim = dim @classmethod - def from_variables(cls, variables: Mapping[Hashable, "Variable"]): + def from_variables(cls, variables: Mapping[Any, "Variable"]): from .variable import IndexVariable if len(variables) != 1: @@ -291,7 +291,7 @@ def _create_variables_from_multiindex(index, dim, level_meta=None): class PandasMultiIndex(PandasIndex): @classmethod - def from_variables(cls, variables: Mapping[Hashable, "Variable"]): + def from_variables(cls, variables: Mapping[Any, "Variable"]): if any([var.ndim != 1 for var in variables.values()]): raise ValueError("PandasMultiIndex only accepts 1-dimensional variables") @@ -499,7 +499,7 @@ def isel_variable_and_index( name: Hashable, variable: "Variable", index: Index, - indexers: Mapping[Hashable, Union[int, slice, np.ndarray, "Variable"]], + indexers: Mapping[Any, Union[int, slice, np.ndarray, "Variable"]], ) -> Tuple["Variable", Optional[Index]]: """Index a Variable and an Index together. diff --git a/xarray/core/merge.py b/xarray/core/merge.py index b8b32bdaa01..50b555594de 100644 --- a/xarray/core/merge.py +++ b/xarray/core/merge.py @@ -1,3 +1,4 @@ +from __future__ import annotations from typing import ( TYPE_CHECKING, AbstractSet, @@ -38,9 +39,9 @@ Tuple[DimsLike, ArrayLike, Mapping, Mapping], ] XarrayValue = Union[DataArray, Variable, VariableLike] - DatasetLike = Union[Dataset, Mapping[Hashable, XarrayValue]] + DatasetLike = Union[Dataset, Mapping[Any, XarrayValue]] CoercibleValue = Union[XarrayValue, pd.Series, pd.DataFrame] - CoercibleMapping = Union[Dataset, Mapping[Hashable, CoercibleValue]] + CoercibleMapping = Union[Dataset, Mapping[Any, CoercibleValue]] PANDAS_TYPES = (pd.Series, pd.DataFrame, pdcompat.Panel) @@ -253,7 +254,7 @@ def merge_collected( def collect_variables_and_indexes( - list_of_mappings: "List[DatasetLike]", + list_of_mappings: List[DatasetLike], ) -> Dict[Hashable, List[MergeElement]]: """Collect variables and indexes from list of mappings of xarray objects. @@ -292,12 +293,14 @@ def append_all(variables, indexes): append_all(coords, indexes) variable = as_variable(variable, name=name) + if variable.dims == (name,): - variable = variable.to_index_variable() + idx_variable = variable.to_index_variable() index = variable._to_xindex() + append(name, idx_variable, index) else: index = None - append(name, variable, index) + append(name, variable, index) return grouped @@ -455,7 +458,7 @@ def merge_coords( compat: str = "minimal", join: str = "outer", priority_arg: Optional[int] = None, - indexes: Optional[Mapping[Hashable, Index]] = None, + indexes: Optional[Mapping[Any, Index]] = None, fill_value: object = dtypes.NA, ) -> Tuple[Dict[Hashable, Variable], Dict[Hashable, Index]]: """Merge coordinate variables. @@ -578,7 +581,7 @@ def merge_core( combine_attrs: Optional[str] = "override", priority_arg: Optional[int] = None, explicit_coords: Optional[Sequence] = None, - indexes: Optional[Mapping[Hashable, Any]] = None, + indexes: Optional[Mapping[Any, Any]] = None, fill_value: object = dtypes.NA, ) -> _MergeResult: """Core logic for merging labeled objects. diff --git a/xarray/core/rolling_exp.py b/xarray/core/rolling_exp.py index e0fe57a9fb0..4c556e8ba93 100644 --- a/xarray/core/rolling_exp.py +++ b/xarray/core/rolling_exp.py @@ -1,5 +1,5 @@ from distutils.version import LooseVersion -from typing import TYPE_CHECKING, Generic, Hashable, Mapping, TypeVar, Union +from typing import TYPE_CHECKING, Generic, Mapping, TypeVar, Union, Any import numpy as np @@ -104,7 +104,7 @@ class RollingExp(Generic[T_DSorDA]): def __init__( self, obj: T_DSorDA, - windows: Mapping[Hashable, Union[int, float]], + windows: Mapping[Any, Union[int, float]], window_type: str = "span", ): self.obj: T_DSorDA = obj diff --git a/xarray/core/utils.py b/xarray/core/utils.py index a139d2ef10a..77d973f613f 100644 --- a/xarray/core/utils.py +++ b/xarray/core/utils.py @@ -274,7 +274,7 @@ def is_duck_array(value: Any) -> bool: def either_dict_or_kwargs( - pos_kwargs: Optional[Mapping[Hashable, T]], + pos_kwargs: Optional[Mapping[Any, T]], kw_kwargs: Mapping[str, T], func_name: str, ) -> Mapping[Hashable, T]: @@ -816,8 +816,8 @@ def get_temp_dimname(dims: Container[Hashable], new_dim: Hashable) -> Hashable: def drop_dims_from_indexers( - indexers: Mapping[Hashable, Any], - dims: Union[list, Mapping[Hashable, int]], + indexers: Mapping[Any, Any], + dims: Union[list, Mapping[Any, int]], missing_dims: str, ) -> Mapping[Hashable, Any]: """Depending on the setting of missing_dims, drop any dimensions from indexers that diff --git a/xarray/core/variable.py b/xarray/core/variable.py index 6b971389de7..a432cba35f1 100644 --- a/xarray/core/variable.py +++ b/xarray/core/variable.py @@ -1,3 +1,4 @@ +from __future__ import annotations import copy import itertools import numbers @@ -86,7 +87,7 @@ class MissingDimensionsError(ValueError): # TODO: move this to an xarray.exceptions module? -def as_variable(obj, name=None) -> "Union[Variable, IndexVariable]": +def as_variable(obj, name=None) -> Union[Variable, IndexVariable]: """Convert an object into a Variable. Parameters @@ -1258,7 +1259,7 @@ def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs): def _pad_options_dim_to_index( self, - pad_option: Mapping[Hashable, Union[int, Tuple[int, int]]], + pad_option: Mapping[Any, Union[int, Tuple[int, int]]], fill_with_shape=False, ): if fill_with_shape: @@ -1270,17 +1271,13 @@ def _pad_options_dim_to_index( def pad( self, - pad_width: Mapping[Hashable, Union[int, Tuple[int, int]]] = None, + pad_width: Mapping[Any, Union[int, Tuple[int, int]]] = None, mode: str = "constant", - stat_length: Union[ - int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]] - ] = None, + stat_length: Union[int, Tuple[int, int], Mapping[Any, Tuple[int, int]]] = None, constant_values: Union[ - int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]] - ] = None, - end_values: Union[ - int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]] + int, Tuple[int, int], Mapping[Any, Tuple[int, int]] ] = None, + end_values: Union[int, Tuple[int, int], Mapping[Any, Tuple[int, int]]] = None, reflect_type: str = None, **pad_width_kwargs: Any, ): diff --git a/xarray/tests/test_accessor_dt.py b/xarray/tests/test_accessor_dt.py index 62da3bab2cd..473f2f32507 100644 --- a/xarray/tests/test_accessor_dt.py +++ b/xarray/tests/test_accessor_dt.py @@ -69,7 +69,7 @@ def setup(self): "is_leap_year", ], ) - def test_field_access(self, field): + def test_field_access(self, field) -> None: if LooseVersion(pd.__version__) >= "1.1.0" and field in ["week", "weekofyear"]: data = self.times.isocalendar()["week"] @@ -96,7 +96,7 @@ def test_field_access(self, field): ("weekday", "day"), ], ) - def test_isocalendar(self, field, pandas_field): + def test_isocalendar(self, field, pandas_field) -> None: if LooseVersion(pd.__version__) < "1.1.0": with pytest.raises( @@ -114,12 +114,12 @@ def test_isocalendar(self, field, pandas_field): actual = self.data.time.dt.isocalendar()[field] assert_equal(expected, actual) - def test_strftime(self): + def test_strftime(self) -> None: assert ( "2000-01-01 01:00:00" == self.data.time.dt.strftime("%Y-%m-%d %H:%M:%S")[1] ) - def test_not_datetime_type(self): + def test_not_datetime_type(self) -> None: nontime_data = self.data.copy() int_data = np.arange(len(self.data.time)).astype("int8") nontime_data = nontime_data.assign_coords(time=int_data) @@ -156,7 +156,7 @@ def test_not_datetime_type(self): "is_leap_year", ], ) - def test_dask_field_access(self, field): + def test_dask_field_access(self, field) -> None: import dask.array as da expected = getattr(self.times_data.dt, field) @@ -182,7 +182,7 @@ def test_dask_field_access(self, field): "weekday", ], ) - def test_isocalendar_dask(self, field): + def test_isocalendar_dask(self, field) -> None: import dask.array as da if LooseVersion(pd.__version__) < "1.1.0": @@ -216,7 +216,7 @@ def test_isocalendar_dask(self, field): ("strftime", "%Y-%m-%d %H:%M:%S"), ], ) - def test_dask_accessor_method(self, method, parameters): + def test_dask_accessor_method(self, method, parameters) -> None: import dask.array as da expected = getattr(self.times_data.dt, method)(parameters) @@ -232,10 +232,10 @@ def test_dask_accessor_method(self, method, parameters): assert_chunks_equal(actual, dask_times_2d) assert_equal(actual.compute(), expected.compute()) - def test_seasons(self): + def test_seasons(self) -> None: dates = pd.date_range(start="2000/01/01", freq="M", periods=12) dates = xr.DataArray(dates) - seasons = [ + season_list = [ "DJF", "DJF", "MAM", @@ -249,14 +249,14 @@ def test_seasons(self): "SON", "DJF", ] - seasons = xr.DataArray(seasons) + seasons = xr.DataArray(season_list) assert_array_equal(seasons.values, dates.dt.season.values) @pytest.mark.parametrize( "method, parameters", [("floor", "D"), ("ceil", "D"), ("round", "D")] ) - def test_accessor_method(self, method, parameters): + def test_accessor_method(self, method, parameters) -> None: dates = pd.date_range("2014-01-01", "2014-05-01", freq="H") xdates = xr.DataArray(dates, dims=["time"]) expected = getattr(dates, method)(parameters) @@ -288,7 +288,7 @@ def setup(self): name="data", ) - def test_not_datetime_type(self): + def test_not_datetime_type(self) -> None: nontime_data = self.data.copy() int_data = np.arange(len(self.data.time)).astype("int8") nontime_data = nontime_data.assign_coords(time=int_data) @@ -298,7 +298,7 @@ def test_not_datetime_type(self): @pytest.mark.parametrize( "field", ["days", "seconds", "microseconds", "nanoseconds"] ) - def test_field_access(self, field): + def test_field_access(self, field) -> None: expected = xr.DataArray( getattr(self.times, field), name=field, coords=[self.times], dims=["time"] ) @@ -308,7 +308,7 @@ def test_field_access(self, field): @pytest.mark.parametrize( "method, parameters", [("floor", "D"), ("ceil", "D"), ("round", "D")] ) - def test_accessor_methods(self, method, parameters): + def test_accessor_methods(self, method, parameters) -> None: dates = pd.timedelta_range(start="1 day", end="30 days", freq="6H") xdates = xr.DataArray(dates, dims=["time"]) expected = getattr(dates, method)(parameters) @@ -319,7 +319,7 @@ def test_accessor_methods(self, method, parameters): @pytest.mark.parametrize( "field", ["days", "seconds", "microseconds", "nanoseconds"] ) - def test_dask_field_access(self, field): + def test_dask_field_access(self, field) -> None: import dask.array as da expected = getattr(self.times_data.dt, field) @@ -340,7 +340,7 @@ def test_dask_field_access(self, field): @pytest.mark.parametrize( "method, parameters", [("floor", "D"), ("ceil", "D"), ("round", "D")] ) - def test_dask_accessor_method(self, method, parameters): + def test_dask_accessor_method(self, method, parameters) -> None: import dask.array as da expected = getattr(self.times_data.dt, method)(parameters) @@ -410,7 +410,7 @@ def times_3d(times): @pytest.mark.parametrize( "field", ["year", "month", "day", "hour", "dayofyear", "dayofweek"] ) -def test_field_access(data, field): +def test_field_access(data, field) -> None: if field == "dayofyear" or field == "dayofweek": pytest.importorskip("cftime", minversion="1.0.2.1") result = getattr(data.time.dt, field) @@ -425,7 +425,7 @@ def test_field_access(data, field): @requires_cftime -def test_isocalendar_cftime(data): +def test_isocalendar_cftime(data) -> None: with pytest.raises( AttributeError, match=r"'CFTimeIndex' object has no attribute 'isocalendar'" @@ -434,7 +434,7 @@ def test_isocalendar_cftime(data): @requires_cftime -def test_date_cftime(data): +def test_date_cftime(data) -> None: with pytest.raises( AttributeError, @@ -445,7 +445,7 @@ def test_date_cftime(data): @requires_cftime @pytest.mark.filterwarnings("ignore::RuntimeWarning") -def test_cftime_strftime_access(data): +def test_cftime_strftime_access(data) -> None: """compare cftime formatting against datetime formatting""" date_format = "%Y%m%d%H" result = data.time.dt.strftime(date_format) @@ -464,7 +464,7 @@ def test_cftime_strftime_access(data): @pytest.mark.parametrize( "field", ["year", "month", "day", "hour", "dayofyear", "dayofweek"] ) -def test_dask_field_access_1d(data, field): +def test_dask_field_access_1d(data, field) -> None: import dask.array as da if field == "dayofyear" or field == "dayofweek": @@ -486,7 +486,7 @@ def test_dask_field_access_1d(data, field): @pytest.mark.parametrize( "field", ["year", "month", "day", "hour", "dayofyear", "dayofweek"] ) -def test_dask_field_access(times_3d, data, field): +def test_dask_field_access(times_3d, data, field) -> None: import dask.array as da if field == "dayofyear" or field == "dayofweek": @@ -514,10 +514,10 @@ def cftime_date_type(calendar): @requires_cftime -def test_seasons(cftime_date_type): +def test_seasons(cftime_date_type) -> None: dates = np.array([cftime_date_type(2000, month, 15) for month in range(1, 13)]) dates = xr.DataArray(dates) - seasons = [ + season_list = [ "DJF", "DJF", "MAM", @@ -531,7 +531,7 @@ def test_seasons(cftime_date_type): "SON", "DJF", ] - seasons = xr.DataArray(seasons) + seasons = xr.DataArray(season_list) assert_array_equal(seasons.values, dates.dt.season.values) @@ -549,7 +549,9 @@ def cftime_rounding_dataarray(cftime_date_type): @requires_cftime @requires_dask @pytest.mark.parametrize("use_dask", [False, True]) -def test_cftime_floor_accessor(cftime_rounding_dataarray, cftime_date_type, use_dask): +def test_cftime_floor_accessor( + cftime_rounding_dataarray, cftime_date_type, use_dask +) -> None: import dask.array as da freq = "D" @@ -580,7 +582,9 @@ def test_cftime_floor_accessor(cftime_rounding_dataarray, cftime_date_type, use_ @requires_cftime @requires_dask @pytest.mark.parametrize("use_dask", [False, True]) -def test_cftime_ceil_accessor(cftime_rounding_dataarray, cftime_date_type, use_dask): +def test_cftime_ceil_accessor( + cftime_rounding_dataarray, cftime_date_type, use_dask +) -> None: import dask.array as da freq = "D" @@ -611,7 +615,9 @@ def test_cftime_ceil_accessor(cftime_rounding_dataarray, cftime_date_type, use_d @requires_cftime @requires_dask @pytest.mark.parametrize("use_dask", [False, True]) -def test_cftime_round_accessor(cftime_rounding_dataarray, cftime_date_type, use_dask): +def test_cftime_round_accessor( + cftime_rounding_dataarray, cftime_date_type, use_dask +) -> None: import dask.array as da freq = "D" diff --git a/xarray/tests/test_accessor_str.py b/xarray/tests/test_accessor_str.py index 519ca762c41..e3c45d732e4 100644 --- a/xarray/tests/test_accessor_str.py +++ b/xarray/tests/test_accessor_str.py @@ -37,6 +37,8 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# type: ignore[assignment] + import re import numpy as np @@ -53,7 +55,7 @@ def dtype(request): @requires_dask -def test_dask(): +def test_dask() -> None: import dask.array as da arr = da.from_array(["a", "b", "c"], chunks=-1) @@ -65,7 +67,7 @@ def test_dask(): assert_equal(result, expected) -def test_count(dtype): +def test_count(dtype) -> None: values = xr.DataArray(["foo", "foofoo", "foooofooofommmfoo"]).astype(dtype) pat_str = dtype(r"f[o]+") pat_re = re.compile(pat_str) @@ -81,7 +83,7 @@ def test_count(dtype): assert_equal(result_re, expected) -def test_count_broadcast(dtype): +def test_count_broadcast(dtype) -> None: values = xr.DataArray(["foo", "foofoo", "foooofooofommmfoo"]).astype(dtype) pat_str = np.array([r"f[o]+", r"o", r"m"]).astype(dtype) pat_re = np.array([re.compile(x) for x in pat_str]) @@ -97,7 +99,7 @@ def test_count_broadcast(dtype): assert_equal(result_re, expected) -def test_contains(dtype): +def test_contains(dtype) -> None: values = xr.DataArray(["Foo", "xYz", "fOOomMm__fOo", "MMM_"]).astype(dtype) # case insensitive using regex @@ -141,7 +143,7 @@ def test_contains(dtype): values.str.contains(pat_re, regex=False) -def test_contains_broadcast(dtype): +def test_contains_broadcast(dtype) -> None: values = xr.DataArray(["Foo", "xYz", "fOOomMm__fOo", "MMM_"], dims="X").astype( dtype ) @@ -208,7 +210,7 @@ def test_contains_broadcast(dtype): assert_equal(result, expected) -def test_starts_ends_with(dtype): +def test_starts_ends_with(dtype) -> None: values = xr.DataArray(["om", "foo_nom", "nom", "bar_foo", "foo"]).astype(dtype) result = values.str.startswith("foo") @@ -222,7 +224,7 @@ def test_starts_ends_with(dtype): assert_equal(result, expected) -def test_starts_ends_with_broadcast(dtype): +def test_starts_ends_with_broadcast(dtype) -> None: values = xr.DataArray( ["om", "foo_nom", "nom", "bar_foo", "foo_bar"], dims="X" ).astype(dtype) @@ -245,7 +247,7 @@ def test_starts_ends_with_broadcast(dtype): assert_equal(result, expected) -def test_case_bytes(): +def test_case_bytes() -> None: value = xr.DataArray(["SOme wOrd"]).astype(np.bytes_) exp_capitalized = xr.DataArray(["Some word"]).astype(np.bytes_) @@ -273,7 +275,7 @@ def test_case_bytes(): assert_equal(res_uppered, exp_uppered) -def test_case_str(): +def test_case_str() -> None: # This string includes some unicode characters # that are common case management corner cases value = xr.DataArray(["SOme wOrd DŽ ß ᾛ ΣΣ ffi⁵Å Ç Ⅰ"]).astype(np.unicode_) @@ -331,7 +333,7 @@ def test_case_str(): assert_equal(res_norm_nfkd, exp_norm_nfkd) -def test_replace(dtype): +def test_replace(dtype) -> None: values = xr.DataArray(["fooBAD__barBAD"], dims=["x"]).astype(dtype) result = values.str.replace("BAD[_]*", "") expected = xr.DataArray(["foobar"], dims=["x"]).astype(dtype) @@ -385,7 +387,7 @@ def test_replace(dtype): assert_equal(result, expected) -def test_replace_callable(): +def test_replace_callable() -> None: values = xr.DataArray(["fooBAD__barBAD"]) # test with callable @@ -421,7 +423,7 @@ def test_replace_callable(): assert_equal(result, exp) -def test_replace_unicode(): +def test_replace_unicode() -> None: # flags + unicode values = xr.DataArray([b"abcd,\xc3\xa0".decode("utf-8")]) expected = xr.DataArray([b"abcd, \xc3\xa0".decode("utf-8")]) @@ -445,7 +447,7 @@ def test_replace_unicode(): assert_equal(result, expected) -def test_replace_compiled_regex(dtype): +def test_replace_compiled_regex(dtype) -> None: values = xr.DataArray(["fooBAD__barBAD"], dims=["x"]).astype(dtype) # test with compiled regex @@ -507,7 +509,7 @@ def test_replace_compiled_regex(dtype): assert_equal(result, expected) -def test_replace_literal(dtype): +def test_replace_literal(dtype) -> None: # GH16808 literal replace (regex=False vs regex=True) values = xr.DataArray(["f.o", "foo"], dims=["X"]).astype(dtype) expected = xr.DataArray(["bao", "bao"], dims=["X"]).astype(dtype) @@ -550,7 +552,7 @@ def test_replace_literal(dtype): values.str.replace(compiled_pat, "", regex=False) -def test_extract_extractall_findall_empty_raises(dtype): +def test_extract_extractall_findall_empty_raises(dtype) -> None: pat_str = dtype(r".*") pat_re = re.compile(pat_str) @@ -575,7 +577,7 @@ def test_extract_extractall_findall_empty_raises(dtype): value.str.findall(pat=pat_re) -def test_extract_multi_None_raises(dtype): +def test_extract_multi_None_raises(dtype) -> None: pat_str = r"(\w+)_(\d+)" pat_re = re.compile(pat_str) @@ -594,7 +596,7 @@ def test_extract_multi_None_raises(dtype): value.str.extract(pat=pat_re, dim=None) -def test_extract_extractall_findall_case_re_raises(dtype): +def test_extract_extractall_findall_case_re_raises(dtype) -> None: pat_str = r".*" pat_re = re.compile(pat_str) @@ -631,7 +633,7 @@ def test_extract_extractall_findall_case_re_raises(dtype): value.str.findall(pat=pat_re, case=False) -def test_extract_extractall_name_collision_raises(dtype): +def test_extract_extractall_name_collision_raises(dtype) -> None: pat_str = r"(\w+)" pat_re = re.compile(pat_str) @@ -674,7 +676,7 @@ def test_extract_extractall_name_collision_raises(dtype): value.str.extractall(pat=pat_re, group_dim="ZZ", match_dim="ZZ") -def test_extract_single_case(dtype): +def test_extract_single_case(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re = pat_str if dtype == np.unicode_ else bytes(pat_str, encoding="UTF-8") pat_re = re.compile(pat_re) @@ -720,7 +722,7 @@ def test_extract_single_case(dtype): assert_equal(res_re_dim, targ_dim) -def test_extract_single_nocase(dtype): +def test_extract_single_nocase(dtype) -> None: pat_str = r"(\w+)?_Xy_\d*" pat_re = pat_str if dtype == np.unicode_ else bytes(pat_str, encoding="UTF-8") pat_re = re.compile(pat_re, flags=re.IGNORECASE) @@ -760,7 +762,7 @@ def test_extract_single_nocase(dtype): assert_equal(res_re_dim, targ_dim) -def test_extract_multi_case(dtype): +def test_extract_multi_case(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re = pat_str if dtype == np.unicode_ else bytes(pat_str, encoding="UTF-8") pat_re = re.compile(pat_re) @@ -798,7 +800,7 @@ def test_extract_multi_case(dtype): assert_equal(res_str_case, expected) -def test_extract_multi_nocase(dtype): +def test_extract_multi_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re = pat_str if dtype == np.unicode_ else bytes(pat_str, encoding="UTF-8") pat_re = re.compile(pat_re, flags=re.IGNORECASE) @@ -833,7 +835,7 @@ def test_extract_multi_nocase(dtype): assert_equal(res_re, expected) -def test_extract_broadcast(dtype): +def test_extract_broadcast(dtype) -> None: value = xr.DataArray( ["a_Xy_0", "ab_xY_10", "abc_Xy_01"], dims=["X"], @@ -862,7 +864,7 @@ def test_extract_broadcast(dtype): assert_equal(res_re, expected) -def test_extractall_single_single_case(dtype): +def test_extractall_single_single_case(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re = pat_str if dtype == np.unicode_ else bytes(pat_str, encoding="UTF-8") pat_re = re.compile(pat_re) @@ -892,7 +894,7 @@ def test_extractall_single_single_case(dtype): assert_equal(res_str_case, expected) -def test_extractall_single_single_nocase(dtype): +def test_extractall_single_single_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re = pat_str if dtype == np.unicode_ else bytes(pat_str, encoding="UTF-8") pat_re = re.compile(pat_re, flags=re.I) @@ -919,7 +921,7 @@ def test_extractall_single_single_nocase(dtype): assert_equal(res_re, expected) -def test_extractall_single_multi_case(dtype): +def test_extractall_single_multi_case(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re = pat_str if dtype == np.unicode_ else bytes(pat_str, encoding="UTF-8") pat_re = re.compile(pat_re) @@ -963,7 +965,7 @@ def test_extractall_single_multi_case(dtype): assert_equal(res_str_case, expected) -def test_extractall_single_multi_nocase(dtype): +def test_extractall_single_multi_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re = pat_str if dtype == np.unicode_ else bytes(pat_str, encoding="UTF-8") pat_re = re.compile(pat_re, flags=re.I) @@ -1008,7 +1010,7 @@ def test_extractall_single_multi_nocase(dtype): assert_equal(res_re, expected) -def test_extractall_multi_single_case(dtype): +def test_extractall_multi_single_case(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re = pat_str if dtype == np.unicode_ else bytes(pat_str, encoding="UTF-8") pat_re = re.compile(pat_re) @@ -1041,7 +1043,7 @@ def test_extractall_multi_single_case(dtype): assert_equal(res_str_case, expected) -def test_extractall_multi_single_nocase(dtype): +def test_extractall_multi_single_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re = pat_str if dtype == np.unicode_ else bytes(pat_str, encoding="UTF-8") pat_re = re.compile(pat_re, flags=re.I) @@ -1071,7 +1073,7 @@ def test_extractall_multi_single_nocase(dtype): assert_equal(res_re, expected) -def test_extractall_multi_multi_case(dtype): +def test_extractall_multi_multi_case(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re = pat_str if dtype == np.unicode_ else bytes(pat_str, encoding="UTF-8") pat_re = re.compile(pat_re) @@ -1119,7 +1121,7 @@ def test_extractall_multi_multi_case(dtype): assert_equal(res_str_case, expected) -def test_extractall_multi_multi_nocase(dtype): +def test_extractall_multi_multi_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re = pat_str if dtype == np.unicode_ else bytes(pat_str, encoding="UTF-8") pat_re = re.compile(pat_re, flags=re.I) @@ -1164,7 +1166,7 @@ def test_extractall_multi_multi_nocase(dtype): assert_equal(res_re, expected) -def test_extractall_broadcast(dtype): +def test_extractall_broadcast(dtype) -> None: value = xr.DataArray( ["a_Xy_0", "ab_xY_10", "abc_Xy_01"], dims=["X"], @@ -1193,7 +1195,7 @@ def test_extractall_broadcast(dtype): assert_equal(res_re, expected) -def test_findall_single_single_case(dtype): +def test_findall_single_single_case(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re = re.compile(dtype(pat_str)) @@ -1220,7 +1222,7 @@ def test_findall_single_single_case(dtype): assert_equal(res_str_case, expected) -def test_findall_single_single_nocase(dtype): +def test_findall_single_single_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re = re.compile(dtype(pat_str), flags=re.I) @@ -1244,7 +1246,7 @@ def test_findall_single_single_nocase(dtype): assert_equal(res_re, expected) -def test_findall_single_multi_case(dtype): +def test_findall_single_multi_case(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re = re.compile(dtype(pat_str)) @@ -1285,7 +1287,7 @@ def test_findall_single_multi_case(dtype): assert_equal(res_str_case, expected) -def test_findall_single_multi_nocase(dtype): +def test_findall_single_multi_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_\d*" pat_re = re.compile(dtype(pat_str), flags=re.I) @@ -1327,7 +1329,7 @@ def test_findall_single_multi_nocase(dtype): assert_equal(res_re, expected) -def test_findall_multi_single_case(dtype): +def test_findall_multi_single_case(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re = re.compile(dtype(pat_str)) @@ -1357,7 +1359,7 @@ def test_findall_multi_single_case(dtype): assert_equal(res_str_case, expected) -def test_findall_multi_single_nocase(dtype): +def test_findall_multi_single_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re = re.compile(dtype(pat_str), flags=re.I) @@ -1384,7 +1386,7 @@ def test_findall_multi_single_nocase(dtype): assert_equal(res_re, expected) -def test_findall_multi_multi_case(dtype): +def test_findall_multi_multi_case(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re = re.compile(dtype(pat_str)) @@ -1429,7 +1431,7 @@ def test_findall_multi_multi_case(dtype): assert_equal(res_str_case, expected) -def test_findall_multi_multi_nocase(dtype): +def test_findall_multi_multi_nocase(dtype) -> None: pat_str = r"(\w+)_Xy_(\d*)" pat_re = re.compile(dtype(pat_str), flags=re.I) @@ -1471,7 +1473,7 @@ def test_findall_multi_multi_nocase(dtype): assert_equal(res_re, expected) -def test_findall_broadcast(dtype): +def test_findall_broadcast(dtype) -> None: value = xr.DataArray( ["a_Xy_0", "ab_xY_10", "abc_Xy_01"], dims=["X"], @@ -1498,7 +1500,7 @@ def test_findall_broadcast(dtype): assert_equal(res_re, expected) -def test_repeat(dtype): +def test_repeat(dtype) -> None: values = xr.DataArray(["a", "b", "c", "d"]).astype(dtype) result = values.str.repeat(3) @@ -1513,7 +1515,7 @@ def test_repeat(dtype): assert_equal(result, expected) -def test_repeat_broadcast(dtype): +def test_repeat_broadcast(dtype) -> None: values = xr.DataArray(["a", "b", "c", "d"], dims=["X"]).astype(dtype) reps = xr.DataArray([3, 4], dims=["Y"]) @@ -1532,7 +1534,7 @@ def test_repeat_broadcast(dtype): assert_equal(result, expected) -def test_match(dtype): +def test_match(dtype) -> None: values = xr.DataArray(["fooBAD__barBAD", "foo"]).astype(dtype) # New match behavior introduced in 0.13 @@ -1566,7 +1568,7 @@ def test_match(dtype): assert_equal(result, expected) -def test_empty_str_methods(): +def test_empty_str_methods() -> None: empty = xr.DataArray(np.empty(shape=(0,), dtype="U")) empty_str = empty empty_int = xr.DataArray(np.empty(shape=(0,), dtype=int)) @@ -1652,7 +1654,7 @@ def test_empty_str_methods(): assert_equal(empty_str, empty.str.translate(table)) -def test_ismethods(dtype): +def test_ismethods(dtype) -> None: values = ["A", "b", "Xy", "4", "3A", "", "TT", "55", "-", " "] exp_alnum = [True, True, True, True, True, False, True, True, False, False] @@ -1698,7 +1700,7 @@ def test_ismethods(dtype): assert_equal(res_upper, exp_upper) -def test_isnumeric(): +def test_isnumeric() -> None: # 0x00bc: ¼ VULGAR FRACTION ONE QUARTER # 0x2605: ★ not number # 0x1378: ፸ ETHIOPIC NUMBER SEVENTY @@ -1721,7 +1723,7 @@ def test_isnumeric(): assert_equal(res_decimal, exp_decimal) -def test_len(dtype): +def test_len(dtype) -> None: values = ["foo", "fooo", "fooooo", "fooooooo"] result = xr.DataArray(values).astype(dtype).str.len() expected = xr.DataArray([len(x) for x in values]) @@ -1729,7 +1731,7 @@ def test_len(dtype): assert_equal(result, expected) -def test_find(dtype): +def test_find(dtype) -> None: values = xr.DataArray(["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF", "XXX"]) values = values.astype(dtype) @@ -1812,7 +1814,7 @@ def test_find(dtype): assert_equal(result_1, expected_1) -def test_find_broadcast(dtype): +def test_find_broadcast(dtype) -> None: values = xr.DataArray( ["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF", "XXX"], dims=["X"] ) @@ -1858,7 +1860,7 @@ def test_find_broadcast(dtype): assert_equal(result_1, expected) -def test_index(dtype): +def test_index(dtype) -> None: s = xr.DataArray(["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF"]).astype(dtype) result_0 = s.str.index("EF") @@ -1914,7 +1916,7 @@ def test_index(dtype): s.str.index("DE") -def test_index_broadcast(dtype): +def test_index_broadcast(dtype) -> None: values = xr.DataArray( ["ABCDEFGEFDBCA", "BCDEFEFEFDBC", "DEFBCGHIEFBC", "EFGHBCEFBCBCBCEF"], dims=["X"], @@ -1949,7 +1951,7 @@ def test_index_broadcast(dtype): assert_equal(result_1, expected) -def test_translate(): +def test_translate() -> None: values = xr.DataArray(["abcdefg", "abcc", "cdddfg", "cdefggg"]) table = str.maketrans("abc", "cde") result = values.str.translate(table) @@ -1958,7 +1960,7 @@ def test_translate(): assert_equal(result, expected) -def test_pad_center_ljust_rjust(dtype): +def test_pad_center_ljust_rjust(dtype) -> None: values = xr.DataArray(["a", "b", "c", "eeeee"]).astype(dtype) result = values.str.center(5) @@ -1986,7 +1988,7 @@ def test_pad_center_ljust_rjust(dtype): assert_equal(result, expected) -def test_pad_center_ljust_rjust_fillchar(dtype): +def test_pad_center_ljust_rjust_fillchar(dtype) -> None: values = xr.DataArray(["a", "bb", "cccc", "ddddd", "eeeeee"]).astype(dtype) result = values.str.center(5, fillchar="X") @@ -2037,7 +2039,7 @@ def test_pad_center_ljust_rjust_fillchar(dtype): values.str.pad(5, fillchar="XY") -def test_pad_center_ljust_rjust_broadcast(dtype): +def test_pad_center_ljust_rjust_broadcast(dtype) -> None: values = xr.DataArray(["a", "bb", "cccc", "ddddd", "eeeeee"], dims="X").astype( dtype ) @@ -2096,7 +2098,7 @@ def test_pad_center_ljust_rjust_broadcast(dtype): assert_equal(result, expected) -def test_zfill(dtype): +def test_zfill(dtype) -> None: values = xr.DataArray(["1", "22", "aaa", "333", "45678"]).astype(dtype) result = values.str.zfill(5) @@ -2110,7 +2112,7 @@ def test_zfill(dtype): assert_equal(result, expected) -def test_zfill_broadcast(dtype): +def test_zfill_broadcast(dtype) -> None: values = xr.DataArray(["1", "22", "aaa", "333", "45678"]).astype(dtype) width = np.array([4, 5, 0, 3, 8]) @@ -2120,7 +2122,7 @@ def test_zfill_broadcast(dtype): assert_equal(result, expected) -def test_slice(dtype): +def test_slice(dtype) -> None: arr = xr.DataArray(["aafootwo", "aabartwo", "aabazqux"]).astype(dtype) result = arr.str.slice(2, 5) @@ -2138,7 +2140,7 @@ def test_slice(dtype): raise -def test_slice_broadcast(dtype): +def test_slice_broadcast(dtype) -> None: arr = xr.DataArray(["aafootwo", "aabartwo", "aabazqux"]).astype(dtype) start = xr.DataArray([1, 2, 3]) stop = 5 @@ -2149,7 +2151,7 @@ def test_slice_broadcast(dtype): assert_equal(result, exp) -def test_slice_replace(dtype): +def test_slice_replace(dtype) -> None: da = lambda x: xr.DataArray(x).astype(dtype) values = da(["short", "a bit longer", "evenlongerthanthat", ""]) @@ -2194,7 +2196,7 @@ def test_slice_replace(dtype): assert_equal(result, expected) -def test_slice_replace_broadcast(dtype): +def test_slice_replace_broadcast(dtype) -> None: values = xr.DataArray(["short", "a bit longer", "evenlongerthanthat", ""]).astype( dtype ) @@ -2210,7 +2212,7 @@ def test_slice_replace_broadcast(dtype): assert_equal(result, expected) -def test_strip_lstrip_rstrip(dtype): +def test_strip_lstrip_rstrip(dtype) -> None: values = xr.DataArray([" aa ", " bb \n", "cc "]).astype(dtype) result = values.str.strip() @@ -2229,7 +2231,7 @@ def test_strip_lstrip_rstrip(dtype): assert_equal(result, expected) -def test_strip_lstrip_rstrip_args(dtype): +def test_strip_lstrip_rstrip_args(dtype) -> None: values = xr.DataArray(["xxABCxx", "xx BNSD", "LDFJH xx"]).astype(dtype) result = values.str.strip("x") @@ -2248,7 +2250,7 @@ def test_strip_lstrip_rstrip_args(dtype): assert_equal(result, expected) -def test_strip_lstrip_rstrip_broadcast(dtype): +def test_strip_lstrip_rstrip_broadcast(dtype) -> None: values = xr.DataArray(["xxABCxx", "yy BNSD", "LDFJH zz"]).astype(dtype) to_strip = xr.DataArray(["x", "y", "z"]).astype(dtype) @@ -2268,7 +2270,7 @@ def test_strip_lstrip_rstrip_broadcast(dtype): assert_equal(result, expected) -def test_wrap(): +def test_wrap() -> None: # test values are: two words less than width, two words equal to width, # two words greater than width, one word less than width, one word # equal to width, one word greater than width, multiple tokens with @@ -2315,7 +2317,7 @@ def test_wrap(): assert_equal(result, expected) -def test_wrap_kwargs_passed(): +def test_wrap_kwargs_passed() -> None: # GH4334 values = xr.DataArray(" hello world ") @@ -2331,7 +2333,7 @@ def test_wrap_kwargs_passed(): assert_equal(result, expected) -def test_get(dtype): +def test_get(dtype) -> None: values = xr.DataArray(["a_b_c", "c_d_e", "f_g_h"]).astype(dtype) result = values.str[2] @@ -2355,7 +2357,7 @@ def test_get(dtype): assert_equal(result, expected) -def test_get_default(dtype): +def test_get_default(dtype) -> None: # GH4334 values = xr.DataArray(["a_b", "c", ""]).astype(dtype) @@ -2365,7 +2367,7 @@ def test_get_default(dtype): assert_equal(result, expected) -def test_get_broadcast(dtype): +def test_get_broadcast(dtype) -> None: values = xr.DataArray(["a_b_c", "c_d_e", "f_g_h"], dims=["X"]).astype(dtype) inds = xr.DataArray([0, 2], dims=["Y"]) @@ -2377,7 +2379,7 @@ def test_get_broadcast(dtype): assert_equal(result, expected) -def test_encode_decode(): +def test_encode_decode() -> None: data = xr.DataArray(["a", "b", "a\xe4"]) encoded = data.str.encode("utf-8") decoded = encoded.str.decode("utf-8") @@ -2385,7 +2387,7 @@ def test_encode_decode(): assert_equal(data, decoded) -def test_encode_decode_errors(): +def test_encode_decode_errors() -> None: encodeBase = xr.DataArray(["a", "b", "a\x9d"]) msg = ( @@ -2419,7 +2421,7 @@ def test_encode_decode_errors(): assert_equal(result, expected) -def test_partition_whitespace(dtype): +def test_partition_whitespace(dtype) -> None: values = xr.DataArray( [ ["abc def", "spam eggs swallow", "red_blue"], @@ -2467,7 +2469,7 @@ def test_partition_whitespace(dtype): assert_equal(res_rpart_dim, exp_rpart_dim) -def test_partition_comma(dtype): +def test_partition_comma(dtype) -> None: values = xr.DataArray( [ ["abc, def", "spam, eggs, swallow", "red_blue"], @@ -2515,7 +2517,7 @@ def test_partition_comma(dtype): assert_equal(res_rpart_dim, exp_rpart_dim) -def test_partition_empty(dtype): +def test_partition_empty(dtype) -> None: values = xr.DataArray([], dims=["X"]).astype(dtype) expected = xr.DataArray(np.zeros((0, 0)), dims=["X", "ZZ"]).astype(dtype) @@ -2525,7 +2527,7 @@ def test_partition_empty(dtype): assert_equal(res, expected) -def test_split_whitespace(dtype): +def test_split_whitespace(dtype) -> None: values = xr.DataArray( [ ["abc def", "spam\t\teggs\tswallow", "red_blue"], @@ -2663,7 +2665,7 @@ def test_split_whitespace(dtype): assert_equal(res_rsplit_none_10, exp_rsplit_none_full) -def test_split_comma(dtype): +def test_split_comma(dtype) -> None: values = xr.DataArray( [ ["abc,def", "spam,,eggs,swallow", "red_blue"], @@ -2801,7 +2803,7 @@ def test_split_comma(dtype): assert_equal(res_rsplit_none_10, exp_rsplit_none_full) -def test_splitters_broadcast(dtype): +def test_splitters_broadcast(dtype) -> None: values = xr.DataArray( ["ab cd,de fg", "spam, ,eggs swallow", "red_blue"], dims=["X"], @@ -2865,7 +2867,7 @@ def test_splitters_broadcast(dtype): assert_equal(res_right, expected_right) -def test_split_empty(dtype): +def test_split_empty(dtype) -> None: values = xr.DataArray([], dims=["X"]).astype(dtype) expected = xr.DataArray(np.zeros((0, 0)), dims=["X", "ZZ"]).astype(dtype) @@ -2875,7 +2877,7 @@ def test_split_empty(dtype): assert_equal(res, expected) -def test_get_dummies(dtype): +def test_get_dummies(dtype) -> None: values_line = xr.DataArray( [["a|ab~abc|abc", "ab", "a||abc|abcd"], ["abcd|ab|a", "abc|ab~abc", "|a"]], dims=["X", "Y"], @@ -2919,7 +2921,7 @@ def test_get_dummies(dtype): assert_equal(res_comma, targ_comma) -def test_get_dummies_broadcast(dtype): +def test_get_dummies_broadcast(dtype) -> None: values = xr.DataArray( ["x~x|x~x", "x", "x|x~x", "x~x"], dims=["X"], @@ -2947,7 +2949,7 @@ def test_get_dummies_broadcast(dtype): assert_equal(res, expected) -def test_get_dummies_empty(dtype): +def test_get_dummies_empty(dtype) -> None: values = xr.DataArray([], dims=["X"]).astype(dtype) expected = xr.DataArray(np.zeros((0, 0)), dims=["X", "ZZ"]).astype(dtype) @@ -2957,7 +2959,7 @@ def test_get_dummies_empty(dtype): assert_equal(res, expected) -def test_splitters_empty_str(dtype): +def test_splitters_empty_str(dtype) -> None: values = xr.DataArray( [["", "", ""], ["", "", ""]], dims=["X", "Y"], @@ -3032,7 +3034,7 @@ def test_splitters_empty_str(dtype): assert_equal(res_dummies, targ_split_dim) -def test_cat_str(dtype): +def test_cat_str(dtype) -> None: values_1 = xr.DataArray( [["a", "bb", "cccc"], ["ddddd", "eeee", "fff"]], dims=["X", "Y"], @@ -3078,7 +3080,7 @@ def test_cat_str(dtype): assert_equal(res_comma, targ_comma) -def test_cat_uniform(dtype): +def test_cat_uniform(dtype) -> None: values_1 = xr.DataArray( [["a", "bb", "cccc"], ["ddddd", "eeee", "fff"]], dims=["X", "Y"], @@ -3127,7 +3129,7 @@ def test_cat_uniform(dtype): assert_equal(res_comma, targ_comma) -def test_cat_broadcast_right(dtype): +def test_cat_broadcast_right(dtype) -> None: values_1 = xr.DataArray( [["a", "bb", "cccc"], ["ddddd", "eeee", "fff"]], dims=["X", "Y"], @@ -3176,7 +3178,7 @@ def test_cat_broadcast_right(dtype): assert_equal(res_comma, targ_comma) -def test_cat_broadcast_left(dtype): +def test_cat_broadcast_left(dtype) -> None: values_1 = xr.DataArray( ["a", "bb", "cccc"], dims=["Y"], @@ -3241,7 +3243,7 @@ def test_cat_broadcast_left(dtype): assert_equal(res_comma, targ_comma) -def test_cat_broadcast_both(dtype): +def test_cat_broadcast_both(dtype) -> None: values_1 = xr.DataArray( ["a", "bb", "cccc"], dims=["Y"], @@ -3306,7 +3308,7 @@ def test_cat_broadcast_both(dtype): assert_equal(res_comma, targ_comma) -def test_cat_multi(): +def test_cat_multi() -> None: values_1 = xr.DataArray( ["11111", "4"], dims=["X"], @@ -3350,7 +3352,7 @@ def test_cat_multi(): assert_equal(res, expected) -def test_join_scalar(dtype): +def test_join_scalar(dtype) -> None: values = xr.DataArray("aaa").astype(dtype) targ = xr.DataArray("aaa").astype(dtype) @@ -3365,7 +3367,7 @@ def test_join_scalar(dtype): assert_identical(res_space, targ) -def test_join_vector(dtype): +def test_join_vector(dtype) -> None: values = xr.DataArray( ["a", "bb", "cccc"], dims=["Y"], @@ -3391,7 +3393,7 @@ def test_join_vector(dtype): assert_identical(res_space_y, targ_space) -def test_join_2d(dtype): +def test_join_2d(dtype) -> None: values = xr.DataArray( [["a", "bb", "cccc"], ["ddddd", "eeee", "fff"]], dims=["X", "Y"], @@ -3437,7 +3439,7 @@ def test_join_2d(dtype): values.str.join() -def test_join_broadcast(dtype): +def test_join_broadcast(dtype) -> None: values = xr.DataArray( ["a", "bb", "cccc"], dims=["X"], @@ -3459,7 +3461,7 @@ def test_join_broadcast(dtype): assert_identical(res, expected) -def test_format_scalar(): +def test_format_scalar() -> None: values = xr.DataArray( ["{}.{Y}.{ZZ}", "{},{},{X},{X}", "{X}-{Y}-{ZZ}"], dims=["X"], @@ -3484,7 +3486,7 @@ def test_format_scalar(): assert_equal(res, expected) -def test_format_broadcast(): +def test_format_broadcast() -> None: values = xr.DataArray( ["{}.{Y}.{ZZ}", "{},{},{X},{X}", "{X}-{Y}-{ZZ}"], dims=["X"], @@ -3518,7 +3520,7 @@ def test_format_broadcast(): assert_equal(res, expected) -def test_mod_scalar(): +def test_mod_scalar() -> None: values = xr.DataArray( ["%s.%s.%s", "%s,%s,%s", "%s-%s-%s"], dims=["X"], @@ -3539,7 +3541,7 @@ def test_mod_scalar(): assert_equal(res, expected) -def test_mod_dict(): +def test_mod_dict() -> None: values = xr.DataArray( ["%(a)s.%(a)s.%(b)s", "%(b)s,%(c)s,%(b)s", "%(c)s-%(b)s-%(a)s"], dims=["X"], @@ -3560,7 +3562,7 @@ def test_mod_dict(): assert_equal(res, expected) -def test_mod_broadcast_single(): +def test_mod_broadcast_single() -> None: values = xr.DataArray( ["%s_1", "%s_2", "%s_3"], dims=["X"], @@ -3582,7 +3584,7 @@ def test_mod_broadcast_single(): assert_equal(res, expected) -def test_mod_broadcast_multi(): +def test_mod_broadcast_multi() -> None: values = xr.DataArray( ["%s.%s.%s", "%s,%s,%s", "%s-%s-%s"], dims=["X"], diff --git a/xarray/tests/test_backends_api.py b/xarray/tests/test_backends_api.py index 4124d0d0b81..cd62ebd4239 100644 --- a/xarray/tests/test_backends_api.py +++ b/xarray/tests/test_backends_api.py @@ -8,7 +8,7 @@ @requires_netCDF4 @requires_scipy -def test__get_default_engine(): +def test__get_default_engine() -> None: engine_remote = _get_default_engine("http://example.org/test.nc", allow_remote=True) assert engine_remote == "netcdf4" @@ -19,7 +19,7 @@ def test__get_default_engine(): assert engine_default == "netcdf4" -def test_custom_engine(): +def test_custom_engine() -> None: expected = xr.Dataset( dict(a=2 * np.arange(5)), coords=dict(x=("x", np.arange(5), dict(units="s"))) ) diff --git a/xarray/tests/test_backends_common.py b/xarray/tests/test_backends_common.py index 7f91e644e2a..75729a8f046 100644 --- a/xarray/tests/test_backends_common.py +++ b/xarray/tests/test_backends_common.py @@ -18,7 +18,7 @@ def __getitem__(self, key): return "success" -def test_robust_getitem(): +def test_robust_getitem() -> None: array = DummyArray(failures=2) with pytest.raises(DummyFailure): array[...] diff --git a/xarray/tests/test_backends_file_manager.py b/xarray/tests/test_backends_file_manager.py index 16f059c7bad..73a6a426371 100644 --- a/xarray/tests/test_backends_file_manager.py +++ b/xarray/tests/test_backends_file_manager.py @@ -5,6 +5,7 @@ import pytest +from typing import Dict from xarray.backends.file_manager import CachingFileManager from xarray.backends.lru_cache import LRUCache from xarray.core.options import set_options @@ -19,7 +20,7 @@ def file_cache(request): yield LRUCache(maxsize) -def test_file_manager_mock_write(file_cache): +def test_file_manager_mock_write(file_cache) -> None: mock_file = mock.Mock() opener = mock.Mock(spec=open, return_value=mock_file) lock = mock.MagicMock(spec=threading.Lock()) @@ -37,10 +38,10 @@ def test_file_manager_mock_write(file_cache): @pytest.mark.parametrize("expected_warning", [None, RuntimeWarning]) -def test_file_manager_autoclose(expected_warning): +def test_file_manager_autoclose(expected_warning) -> None: mock_file = mock.Mock() opener = mock.Mock(return_value=mock_file) - cache = {} + cache: Dict = {} manager = CachingFileManager(opener, "filename", cache=cache) manager.acquire() @@ -55,10 +56,10 @@ def test_file_manager_autoclose(expected_warning): mock_file.close.assert_called_once_with() -def test_file_manager_autoclose_while_locked(): +def test_file_manager_autoclose_while_locked() -> None: opener = mock.Mock() lock = threading.Lock() - cache = {} + cache: Dict = {} manager = CachingFileManager(opener, "filename", lock=lock, cache=cache) manager.acquire() @@ -74,17 +75,17 @@ def test_file_manager_autoclose_while_locked(): assert cache -def test_file_manager_repr(): +def test_file_manager_repr() -> None: opener = mock.Mock() manager = CachingFileManager(opener, "my-file") assert "my-file" in repr(manager) -def test_file_manager_refcounts(): +def test_file_manager_refcounts() -> None: mock_file = mock.Mock() opener = mock.Mock(spec=open, return_value=mock_file) - cache = {} - ref_counts = {} + cache: Dict = {} + ref_counts: Dict = {} manager = CachingFileManager(opener, "filename", cache=cache, ref_counts=ref_counts) assert ref_counts[manager._key] == 1 @@ -114,10 +115,10 @@ def test_file_manager_refcounts(): assert not cache -def test_file_manager_replace_object(): +def test_file_manager_replace_object() -> None: opener = mock.Mock() - cache = {} - ref_counts = {} + cache: Dict = {} + ref_counts: Dict = {} manager = CachingFileManager(opener, "filename", cache=cache, ref_counts=ref_counts) manager.acquire() @@ -131,7 +132,7 @@ def test_file_manager_replace_object(): manager.close() -def test_file_manager_write_consecutive(tmpdir, file_cache): +def test_file_manager_write_consecutive(tmpdir, file_cache) -> None: path1 = str(tmpdir.join("testing1.txt")) path2 = str(tmpdir.join("testing2.txt")) manager1 = CachingFileManager(open, path1, mode="w", cache=file_cache) @@ -154,7 +155,7 @@ def test_file_manager_write_consecutive(tmpdir, file_cache): assert f.read() == "bar" -def test_file_manager_write_concurrent(tmpdir, file_cache): +def test_file_manager_write_concurrent(tmpdir, file_cache) -> None: path = str(tmpdir.join("testing.txt")) manager = CachingFileManager(open, path, mode="w", cache=file_cache) f1 = manager.acquire() @@ -174,7 +175,7 @@ def test_file_manager_write_concurrent(tmpdir, file_cache): assert f.read() == "foobarbaz" -def test_file_manager_write_pickle(tmpdir, file_cache): +def test_file_manager_write_pickle(tmpdir, file_cache) -> None: path = str(tmpdir.join("testing.txt")) manager = CachingFileManager(open, path, mode="w", cache=file_cache) f = manager.acquire() @@ -190,7 +191,7 @@ def test_file_manager_write_pickle(tmpdir, file_cache): assert f.read() == "foobar" -def test_file_manager_read(tmpdir, file_cache): +def test_file_manager_read(tmpdir, file_cache) -> None: path = str(tmpdir.join("testing.txt")) with open(path, "w") as f: @@ -202,7 +203,7 @@ def test_file_manager_read(tmpdir, file_cache): manager.close() -def test_file_manager_acquire_context(tmpdir, file_cache): +def test_file_manager_acquire_context(tmpdir, file_cache) -> None: path = str(tmpdir.join("testing.txt")) with open(path, "w") as f: diff --git a/xarray/tests/test_backends_locks.py b/xarray/tests/test_backends_locks.py index f7e48b65d46..0aa5f99f282 100644 --- a/xarray/tests/test_backends_locks.py +++ b/xarray/tests/test_backends_locks.py @@ -3,7 +3,7 @@ from xarray.backends import locks -def test_threaded_lock(): +def test_threaded_lock() -> None: lock1 = locks._get_threaded_lock("foo") assert isinstance(lock1, type(threading.Lock())) lock2 = locks._get_threaded_lock("foo") diff --git a/xarray/tests/test_backends_lru_cache.py b/xarray/tests/test_backends_lru_cache.py index 2aaa8c9e631..28924321f6e 100644 --- a/xarray/tests/test_backends_lru_cache.py +++ b/xarray/tests/test_backends_lru_cache.py @@ -4,9 +4,11 @@ from xarray.backends.lru_cache import LRUCache +from typing import Any -def test_simple(): - cache = LRUCache(maxsize=2) + +def test_simple() -> None: + cache: LRUCache[Any, Any] = LRUCache(maxsize=2) cache["x"] = 1 cache["y"] = 2 @@ -22,21 +24,21 @@ def test_simple(): assert list(cache.items()) == [("y", 2), ("z", 3)] -def test_trivial(): - cache = LRUCache(maxsize=0) +def test_trivial() -> None: + cache: LRUCache[Any, Any] = LRUCache(maxsize=0) cache["x"] = 1 assert len(cache) == 0 -def test_invalid(): +def test_invalid() -> None: with pytest.raises(TypeError): - LRUCache(maxsize=None) + LRUCache(maxsize=None) # type: ignore with pytest.raises(ValueError): LRUCache(maxsize=-1) -def test_update_priority(): - cache = LRUCache(maxsize=2) +def test_update_priority() -> None: + cache: LRUCache[Any, Any] = LRUCache(maxsize=2) cache["x"] = 1 cache["y"] = 2 assert list(cache) == ["x", "y"] @@ -48,15 +50,15 @@ def test_update_priority(): assert list(cache.items()) == [("y", 2), ("x", 3)] -def test_del(): - cache = LRUCache(maxsize=2) +def test_del() -> None: + cache: LRUCache[Any, Any] = LRUCache(maxsize=2) cache["x"] = 1 cache["y"] = 2 del cache["x"] assert dict(cache) == {"y": 2} -def test_on_evict(): +def test_on_evict() -> None: on_evict = mock.Mock() cache = LRUCache(maxsize=1, on_evict=on_evict) cache["x"] = 1 @@ -64,15 +66,15 @@ def test_on_evict(): on_evict.assert_called_once_with("x", 1) -def test_on_evict_trivial(): +def test_on_evict_trivial() -> None: on_evict = mock.Mock() cache = LRUCache(maxsize=0, on_evict=on_evict) cache["x"] = 1 on_evict.assert_called_once_with("x", 1) -def test_resize(): - cache = LRUCache(maxsize=2) +def test_resize() -> None: + cache: LRUCache[Any, Any] = LRUCache(maxsize=2) assert cache.maxsize == 2 cache["w"] = 0 cache["x"] = 1 diff --git a/xarray/tests/test_cftimeindex_resample.py b/xarray/tests/test_cftimeindex_resample.py index 526f3fc30c1..af15f997643 100644 --- a/xarray/tests/test_cftimeindex_resample.py +++ b/xarray/tests/test_cftimeindex_resample.py @@ -58,7 +58,7 @@ def da(index): @pytest.mark.parametrize("closed", [None, "left", "right"]) @pytest.mark.parametrize("label", [None, "left", "right"]) @pytest.mark.parametrize("base", [24, 31]) -def test_resample(freqs, closed, label, base): +def test_resample(freqs, closed, label, base) -> None: initial_freq, resample_freq = freqs start = "2000-01-01T12:07:01" index_kwargs = dict(start=start, periods=5, freq=initial_freq) @@ -121,7 +121,7 @@ def test_resample(freqs, closed, label, base): ("AS", "left"), ], ) -def test_closed_label_defaults(freq, expected): +def test_closed_label_defaults(freq, expected) -> None: assert CFTimeGrouper(freq=freq).closed == expected assert CFTimeGrouper(freq=freq).label == expected @@ -130,7 +130,7 @@ def test_closed_label_defaults(freq, expected): @pytest.mark.parametrize( "calendar", ["gregorian", "noleap", "all_leap", "360_day", "julian"] ) -def test_calendars(calendar): +def test_calendars(calendar) -> None: # Limited testing for non-standard calendars freq, closed, label, base = "8001T", None, None, 17 loffset = datetime.timedelta(hours=12) diff --git a/xarray/tests/test_coarsen.py b/xarray/tests/test_coarsen.py index 278a961166f..ef2eeac0e0b 100644 --- a/xarray/tests/test_coarsen.py +++ b/xarray/tests/test_coarsen.py @@ -17,14 +17,14 @@ from .test_dataset import ds -def test_coarsen_absent_dims_error(ds): +def test_coarsen_absent_dims_error(ds) -> None: with pytest.raises(ValueError, match=r"not found in Dataset."): ds.coarsen(foo=2) @pytest.mark.parametrize("dask", [True, False]) @pytest.mark.parametrize(("boundary", "side"), [("trim", "left"), ("pad", "right")]) -def test_coarsen_dataset(ds, dask, boundary, side): +def test_coarsen_dataset(ds, dask, boundary, side) -> None: if dask and has_dask: ds = ds.chunk({"x": 4}) @@ -39,7 +39,7 @@ def test_coarsen_dataset(ds, dask, boundary, side): @pytest.mark.parametrize("dask", [True, False]) -def test_coarsen_coords(ds, dask): +def test_coarsen_coords(ds, dask) -> None: if dask and has_dask: ds = ds.chunk({"x": 4}) @@ -64,7 +64,7 @@ def test_coarsen_coords(ds, dask): @requires_cftime -def test_coarsen_coords_cftime(): +def test_coarsen_coords_cftime() -> None: times = xr.cftime_range("2000", periods=6) da = xr.DataArray(range(6), [("time", times)]) actual = da.coarsen(time=3).mean() @@ -79,7 +79,7 @@ def test_coarsen_coords_cftime(): ("mean", ()), ], ) -def test_coarsen_keep_attrs(funcname, argument): +def test_coarsen_keep_attrs(funcname, argument) -> None: global_attrs = {"units": "test", "long_name": "testing"} da_attrs = {"da_attr": "test"} attrs_coords = {"attrs_coords": "test"} @@ -157,7 +157,7 @@ def test_coarsen_keep_attrs(funcname, argument): @pytest.mark.parametrize("ds", (1, 2), indirect=True) @pytest.mark.parametrize("window", (1, 2, 3, 4)) @pytest.mark.parametrize("name", ("sum", "mean", "std", "var", "min", "max", "median")) -def test_coarsen_reduce(ds, window, name): +def test_coarsen_reduce(ds, window, name) -> None: # Use boundary="trim" to accomodate all window sizes used in tests coarsen_obj = ds.coarsen(time=window, boundary="trim") @@ -181,7 +181,7 @@ def test_coarsen_reduce(ds, window, name): ("mean", ()), ], ) -def test_coarsen_da_keep_attrs(funcname, argument): +def test_coarsen_da_keep_attrs(funcname, argument) -> None: attrs_da = {"da_attr": "test"} attrs_coords = {"attrs_coords": "test"} @@ -237,7 +237,7 @@ def test_coarsen_da_keep_attrs(funcname, argument): @pytest.mark.parametrize("da", (1, 2), indirect=True) @pytest.mark.parametrize("window", (1, 2, 3, 4)) @pytest.mark.parametrize("name", ("sum", "mean", "std", "max")) -def test_coarsen_da_reduce(da, window, name): +def test_coarsen_da_reduce(da, window, name) -> None: if da.isnull().sum() > 1 and window == 1: pytest.skip("These parameters lead to all-NaN slices") @@ -251,7 +251,7 @@ def test_coarsen_da_reduce(da, window, name): @pytest.mark.parametrize("dask", [True, False]) -def test_coarsen_construct(dask): +def test_coarsen_construct(dask) -> None: ds = Dataset( { diff --git a/xarray/tests/test_coding.py b/xarray/tests/test_coding.py index 839f2fd1f2e..1c2e5aa505a 100644 --- a/xarray/tests/test_coding.py +++ b/xarray/tests/test_coding.py @@ -14,7 +14,7 @@ import dask.array as da -def test_CFMaskCoder_decode(): +def test_CFMaskCoder_decode() -> None: original = xr.Variable(("x",), [0, -1, 1], {"_FillValue": -1}) expected = xr.Variable(("x",), [0, np.nan, 1]) coder = variables.CFMaskCoder() @@ -43,7 +43,7 @@ def test_CFMaskCoder_decode(): CFMASKCODER_ENCODE_DTYPE_CONFLICT_TESTS.values(), ids=list(CFMASKCODER_ENCODE_DTYPE_CONFLICT_TESTS.keys()), ) -def test_CFMaskCoder_encode_missing_fill_values_conflict(data, encoding): +def test_CFMaskCoder_encode_missing_fill_values_conflict(data, encoding) -> None: original = xr.Variable(("x",), data, encoding=encoding) encoded = encode_cf_variable(original) @@ -55,7 +55,7 @@ def test_CFMaskCoder_encode_missing_fill_values_conflict(data, encoding): assert_identical(roundtripped, original) -def test_CFMaskCoder_missing_value(): +def test_CFMaskCoder_missing_value() -> None: expected = xr.DataArray( np.array([[26915, 27755, -9999, 27705], [25595, -9999, 28315, -9999]]), dims=["npts", "ntimes"], @@ -74,7 +74,7 @@ def test_CFMaskCoder_missing_value(): @requires_dask -def test_CFMaskCoder_decode_dask(): +def test_CFMaskCoder_decode_dask() -> None: original = xr.Variable(("x",), [0, -1, 1], {"_FillValue": -1}).chunk() expected = xr.Variable(("x",), [0, np.nan, 1]) coder = variables.CFMaskCoder() @@ -87,7 +87,7 @@ def test_CFMaskCoder_decode_dask(): # TODO(shoyer): parameterize when we have more coders -def test_coder_roundtrip(): +def test_coder_roundtrip() -> None: original = xr.Variable(("x",), [0.0, np.nan, 1.0]) coder = variables.CFMaskCoder() roundtripped = coder.decode(coder.encode(original)) @@ -95,7 +95,7 @@ def test_coder_roundtrip(): @pytest.mark.parametrize("dtype", "u1 u2 i1 i2 f2 f4".split()) -def test_scaling_converts_to_float32(dtype): +def test_scaling_converts_to_float32(dtype) -> None: original = xr.Variable( ("x",), np.arange(10, dtype=dtype), encoding=dict(scale_factor=10) ) @@ -109,7 +109,7 @@ def test_scaling_converts_to_float32(dtype): @pytest.mark.parametrize("scale_factor", (10, [10])) @pytest.mark.parametrize("add_offset", (0.1, [0.1])) -def test_scaling_offset_as_list(scale_factor, add_offset): +def test_scaling_offset_as_list(scale_factor, add_offset) -> None: # test for #4631 encoding = dict(scale_factor=scale_factor, add_offset=add_offset) original = xr.Variable(("x",), np.arange(10.0), encoding=encoding) @@ -120,7 +120,7 @@ def test_scaling_offset_as_list(scale_factor, add_offset): @pytest.mark.parametrize("bits", [1, 2, 4, 8]) -def test_decode_unsigned_from_signed(bits): +def test_decode_unsigned_from_signed(bits) -> None: unsigned_dtype = np.dtype(f"u{bits}") signed_dtype = np.dtype(f"i{bits}") original_values = np.array([np.iinfo(unsigned_dtype).max], dtype=unsigned_dtype) @@ -134,7 +134,7 @@ def test_decode_unsigned_from_signed(bits): @pytest.mark.parametrize("bits", [1, 2, 4, 8]) -def test_decode_signed_from_unsigned(bits): +def test_decode_signed_from_unsigned(bits) -> None: unsigned_dtype = np.dtype(f"u{bits}") signed_dtype = np.dtype(f"i{bits}") original_values = np.array([-1], dtype=signed_dtype) diff --git a/xarray/tests/test_coding_strings.py b/xarray/tests/test_coding_strings.py index 800e91d9473..e35e31b74ad 100644 --- a/xarray/tests/test_coding_strings.py +++ b/xarray/tests/test_coding_strings.py @@ -13,7 +13,7 @@ import dask.array as da -def test_vlen_dtype(): +def test_vlen_dtype() -> None: dtype = strings.create_vlen_dtype(str) assert dtype.metadata["element_type"] == str assert strings.is_unicode_dtype(dtype) @@ -29,7 +29,7 @@ def test_vlen_dtype(): assert strings.check_vlen_dtype(np.dtype(object)) is None -def test_EncodedStringCoder_decode(): +def test_EncodedStringCoder_decode() -> None: coder = strings.EncodedStringCoder() raw_data = np.array([b"abc", "ß∂µ∆".encode()]) @@ -43,7 +43,7 @@ def test_EncodedStringCoder_decode(): @requires_dask -def test_EncodedStringCoder_decode_dask(): +def test_EncodedStringCoder_decode_dask() -> None: coder = strings.EncodedStringCoder() raw_data = np.array([b"abc", "ß∂µ∆".encode()]) @@ -59,7 +59,7 @@ def test_EncodedStringCoder_decode_dask(): assert_identical(actual_indexed, expected[0]) -def test_EncodedStringCoder_encode(): +def test_EncodedStringCoder_encode() -> None: dtype = strings.create_vlen_dtype(str) raw_data = np.array(["abc", "ß∂µ∆"], dtype=dtype) expected_data = np.array([r.encode("utf-8") for r in raw_data], dtype=object) @@ -86,7 +86,7 @@ def test_EncodedStringCoder_encode(): Variable((), b"a"), ], ) -def test_CharacterArrayCoder_roundtrip(original): +def test_CharacterArrayCoder_roundtrip(original) -> None: coder = strings.CharacterArrayCoder() roundtripped = coder.decode(coder.encode(original)) assert_identical(original, roundtripped) @@ -99,7 +99,7 @@ def test_CharacterArrayCoder_roundtrip(original): np.array([b"a", b"bc"], dtype=strings.create_vlen_dtype(bytes)), ], ) -def test_CharacterArrayCoder_encode(data): +def test_CharacterArrayCoder_encode(data) -> None: coder = strings.CharacterArrayCoder() raw = Variable(("x",), data) actual = coder.encode(raw) @@ -114,7 +114,7 @@ def test_CharacterArrayCoder_encode(data): (Variable(("x",), [b"ab", b"cdef"], encoding={"char_dim_name": "foo"}), "foo"), ], ) -def test_CharacterArrayCoder_char_dim_name(original, expected_char_dim_name): +def test_CharacterArrayCoder_char_dim_name(original, expected_char_dim_name) -> None: coder = strings.CharacterArrayCoder() encoded = coder.encode(original) roundtripped = coder.decode(encoded) @@ -123,7 +123,7 @@ def test_CharacterArrayCoder_char_dim_name(original, expected_char_dim_name): assert roundtripped.dims[-1] == original.dims[-1] -def test_StackedBytesArray(): +def test_StackedBytesArray() -> None: array = np.array([[b"a", b"b", b"c"], [b"d", b"e", b"f"]], dtype="S") actual = strings.StackedBytesArray(array) expected = np.array([b"abc", b"def"], dtype="S") @@ -140,7 +140,7 @@ def test_StackedBytesArray(): actual[B[:, :2]] -def test_StackedBytesArray_scalar(): +def test_StackedBytesArray_scalar() -> None: array = np.array([b"a", b"b", b"c"], dtype="S") actual = strings.StackedBytesArray(array) @@ -158,7 +158,7 @@ def test_StackedBytesArray_scalar(): actual[B[:2]] -def test_StackedBytesArray_vectorized_indexing(): +def test_StackedBytesArray_vectorized_indexing() -> None: array = np.array([[b"a", b"b", b"c"], [b"d", b"e", b"f"]], dtype="S") stacked = strings.StackedBytesArray(array) expected = np.array([[b"abc", b"def"], [b"def", b"abc"]]) @@ -169,7 +169,7 @@ def test_StackedBytesArray_vectorized_indexing(): assert_array_equal(actual, expected) -def test_char_to_bytes(): +def test_char_to_bytes() -> None: array = np.array([[b"a", b"b", b"c"], [b"d", b"e", b"f"]]) expected = np.array([b"abc", b"def"]) actual = strings.char_to_bytes(array) @@ -180,13 +180,13 @@ def test_char_to_bytes(): assert_array_equal(actual, expected) -def test_char_to_bytes_ndim_zero(): +def test_char_to_bytes_ndim_zero() -> None: expected = np.array(b"a") actual = strings.char_to_bytes(expected) assert_array_equal(actual, expected) -def test_char_to_bytes_size_zero(): +def test_char_to_bytes_size_zero() -> None: array = np.zeros((3, 0), dtype="S1") expected = np.array([b"", b"", b""]) actual = strings.char_to_bytes(array) @@ -194,7 +194,7 @@ def test_char_to_bytes_size_zero(): @requires_dask -def test_char_to_bytes_dask(): +def test_char_to_bytes_dask() -> None: numpy_array = np.array([[b"a", b"b", b"c"], [b"d", b"e", b"f"]]) array = da.from_array(numpy_array, ((2,), (3,))) expected = np.array([b"abc", b"def"]) @@ -208,7 +208,7 @@ def test_char_to_bytes_dask(): strings.char_to_bytes(array.rechunk(1)) -def test_bytes_to_char(): +def test_bytes_to_char() -> None: array = np.array([[b"ab", b"cd"], [b"ef", b"gh"]]) expected = np.array([[[b"a", b"b"], [b"c", b"d"]], [[b"e", b"f"], [b"g", b"h"]]]) actual = strings.bytes_to_char(array) @@ -220,7 +220,7 @@ def test_bytes_to_char(): @requires_dask -def test_bytes_to_char_dask(): +def test_bytes_to_char_dask() -> None: numpy_array = np.array([b"ab", b"cd"]) array = da.from_array(numpy_array, ((1, 1),)) expected = np.array([[b"a", b"b"], [b"c", b"d"]]) diff --git a/xarray/tests/test_coding_times.py b/xarray/tests/test_coding_times.py index f0882afe367..4c40e7aff37 100644 --- a/xarray/tests/test_coding_times.py +++ b/xarray/tests/test_coding_times.py @@ -109,7 +109,7 @@ def _all_cftime_date_types(): @requires_cftime @pytest.mark.filterwarnings("ignore:Ambiguous reference date string") @pytest.mark.parametrize(["num_dates", "units", "calendar"], _CF_DATETIME_TESTS) -def test_cf_datetime(num_dates, units, calendar): +def test_cf_datetime(num_dates, units, calendar) -> None: import cftime expected = cftime.num2date( @@ -145,7 +145,7 @@ def test_cf_datetime(num_dates, units, calendar): @requires_cftime -def test_decode_cf_datetime_overflow(): +def test_decode_cf_datetime_overflow() -> None: # checks for # https://github.com/pydata/pandas/issues/14068 # https://github.com/pydata/xarray/issues/975 @@ -165,7 +165,7 @@ def test_decode_cf_datetime_overflow(): assert result == expected[i] -def test_decode_cf_datetime_non_standard_units(): +def test_decode_cf_datetime_non_standard_units() -> None: expected = pd.date_range(periods=100, start="1970-01-01", freq="h") # netCDFs from madis.noaa.gov use this format for their time units # they cannot be parsed by cftime, but pd.Timestamp works @@ -175,7 +175,7 @@ def test_decode_cf_datetime_non_standard_units(): @requires_cftime -def test_decode_cf_datetime_non_iso_strings(): +def test_decode_cf_datetime_non_iso_strings() -> None: # datetime strings that are _almost_ ISO compliant but not quite, # but which cftime.num2date can still parse correctly expected = pd.date_range(periods=100, start="2000-01-01", freq="h") @@ -195,7 +195,7 @@ def test_decode_cf_datetime_non_iso_strings(): @requires_cftime @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS) -def test_decode_standard_calendar_inside_timestamp_range(calendar): +def test_decode_standard_calendar_inside_timestamp_range(calendar) -> None: import cftime units = "days since 0001-01-01" @@ -215,7 +215,7 @@ def test_decode_standard_calendar_inside_timestamp_range(calendar): @requires_cftime @pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS) -def test_decode_non_standard_calendar_inside_timestamp_range(calendar): +def test_decode_non_standard_calendar_inside_timestamp_range(calendar) -> None: import cftime units = "days since 0001-01-01" @@ -240,7 +240,7 @@ def test_decode_non_standard_calendar_inside_timestamp_range(calendar): @requires_cftime @pytest.mark.parametrize("calendar", _ALL_CALENDARS) -def test_decode_dates_outside_timestamp_range(calendar): +def test_decode_dates_outside_timestamp_range(calendar) -> None: from datetime import datetime import cftime @@ -267,7 +267,9 @@ def test_decode_dates_outside_timestamp_range(calendar): @requires_cftime @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS) -def test_decode_standard_calendar_single_element_inside_timestamp_range(calendar): +def test_decode_standard_calendar_single_element_inside_timestamp_range( + calendar, +) -> None: units = "days since 0001-01-01" for num_time in [735368, [735368], [[735368]]]: with warnings.catch_warnings(): @@ -278,7 +280,9 @@ def test_decode_standard_calendar_single_element_inside_timestamp_range(calendar @requires_cftime @pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS) -def test_decode_non_standard_calendar_single_element_inside_timestamp_range(calendar): +def test_decode_non_standard_calendar_single_element_inside_timestamp_range( + calendar, +) -> None: units = "days since 0001-01-01" for num_time in [735368, [735368], [[735368]]]: with warnings.catch_warnings(): @@ -289,7 +293,7 @@ def test_decode_non_standard_calendar_single_element_inside_timestamp_range(cale @requires_cftime @pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS) -def test_decode_single_element_outside_timestamp_range(calendar): +def test_decode_single_element_outside_timestamp_range(calendar) -> None: import cftime units = "days since 0001-01-01" @@ -309,7 +313,9 @@ def test_decode_single_element_outside_timestamp_range(calendar): @requires_cftime @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS) -def test_decode_standard_calendar_multidim_time_inside_timestamp_range(calendar): +def test_decode_standard_calendar_multidim_time_inside_timestamp_range( + calendar, +) -> None: import cftime units = "days since 0001-01-01" @@ -338,7 +344,9 @@ def test_decode_standard_calendar_multidim_time_inside_timestamp_range(calendar) @requires_cftime @pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS) -def test_decode_nonstandard_calendar_multidim_time_inside_timestamp_range(calendar): +def test_decode_nonstandard_calendar_multidim_time_inside_timestamp_range( + calendar, +) -> None: import cftime units = "days since 0001-01-01" @@ -377,7 +385,7 @@ def test_decode_nonstandard_calendar_multidim_time_inside_timestamp_range(calend @requires_cftime @pytest.mark.parametrize("calendar", _ALL_CALENDARS) -def test_decode_multidim_time_outside_timestamp_range(calendar): +def test_decode_multidim_time_outside_timestamp_range(calendar) -> None: from datetime import datetime import cftime @@ -414,7 +422,7 @@ def test_decode_multidim_time_outside_timestamp_range(calendar): ("calendar", "num_time"), [("360_day", 720058.0), ("all_leap", 732059.0), ("366_day", 732059.0)], ) -def test_decode_non_standard_calendar_single_element(calendar, num_time): +def test_decode_non_standard_calendar_single_element(calendar, num_time) -> None: import cftime units = "days since 0001-01-01" @@ -429,7 +437,7 @@ def test_decode_non_standard_calendar_single_element(calendar, num_time): @requires_cftime -def test_decode_360_day_calendar(): +def test_decode_360_day_calendar() -> None: import cftime calendar = "360_day" @@ -454,7 +462,7 @@ def test_decode_360_day_calendar(): @requires_cftime -def test_decode_abbreviation(): +def test_decode_abbreviation() -> None: """Test making sure we properly fall back to cftime on abbreviated units.""" import cftime @@ -479,7 +487,7 @@ def test_decode_abbreviation(): ), ], ) -def test_cf_datetime_nan(num_dates, units, expected_list): +def test_cf_datetime_nan(num_dates, units, expected_list) -> None: with warnings.catch_warnings(): warnings.filterwarnings("ignore", "All-NaN") actual = coding.times.decode_cf_datetime(num_dates, units) @@ -489,7 +497,7 @@ def test_cf_datetime_nan(num_dates, units, expected_list): @requires_cftime -def test_decoded_cf_datetime_array_2d(): +def test_decoded_cf_datetime_array_2d() -> None: # regression test for GH1229 variable = Variable( ("x", "y"), np.array([[0, 1], [2, 3]]), {"units": "days since 2000-01-01"} @@ -512,7 +520,7 @@ def test_decoded_cf_datetime_array_2d(): @pytest.mark.parametrize(("freq", "units"), FREQUENCIES_TO_ENCODING_UNITS.items()) -def test_infer_datetime_units(freq, units): +def test_infer_datetime_units(freq, units) -> None: dates = pd.date_range("2000", periods=2, freq=freq) expected = f"{units} since 2000-01-01 00:00:00" assert expected == coding.times.infer_datetime_units(dates) @@ -529,7 +537,7 @@ def test_infer_datetime_units(freq, units): (pd.to_datetime(["NaT"]), "days since 1970-01-01 00:00:00"), ], ) -def test_infer_datetime_units_with_NaT(dates, expected): +def test_infer_datetime_units_with_NaT(dates, expected) -> None: assert expected == coding.times.infer_datetime_units(dates) @@ -551,7 +559,7 @@ def test_infer_datetime_units_with_NaT(dates, expected): "calendar", _NON_STANDARD_CALENDARS + ["gregorian", "proleptic_gregorian"] ) @pytest.mark.parametrize(("date_args", "expected"), _CFTIME_DATETIME_UNITS_TESTS) -def test_infer_cftime_datetime_units(calendar, date_args, expected): +def test_infer_cftime_datetime_units(calendar, date_args, expected) -> None: date_type = _all_cftime_date_types()[calendar] dates = [date_type(*args) for args in date_args] assert expected == coding.times.infer_datetime_units(dates) @@ -572,7 +580,7 @@ def test_infer_cftime_datetime_units(calendar, date_args, expected): (["NaT", "NaT"], "days", [np.nan, np.nan]), ], ) -def test_cf_timedelta(timedeltas, units, numbers): +def test_cf_timedelta(timedeltas, units, numbers) -> None: if timedeltas == "NaT": timedeltas = np.timedelta64("NaT", "ns") else: @@ -595,7 +603,7 @@ def test_cf_timedelta(timedeltas, units, numbers): assert_array_equal(expected, actual) -def test_cf_timedelta_2d(): +def test_cf_timedelta_2d() -> None: timedeltas = ["1D", "2D", "3D"] units = "days" numbers = np.atleast_2d([1, 2, 3]) @@ -605,7 +613,7 @@ def test_cf_timedelta_2d(): actual = coding.times.decode_cf_timedelta(numbers, units) assert_array_equal(expected, actual) - assert expected.dtype == actual.dtype + assert expected.dtype == actual.dtype # type: ignore @pytest.mark.parametrize( @@ -617,7 +625,7 @@ def test_cf_timedelta_2d(): (pd.to_timedelta(["1m3s", "1m4s"]), "seconds"), ], ) -def test_infer_timedelta_units(deltas, expected): +def test_infer_timedelta_units(deltas, expected) -> None: assert expected == coding.times.infer_timedelta_units(deltas) @@ -631,7 +639,7 @@ def test_infer_timedelta_units(deltas, expected): ((1000, 2, 3, 4, 5, 6), "1000-02-03 04:05:06.000000"), ], ) -def test_format_cftime_datetime(date_args, expected): +def test_format_cftime_datetime(date_args, expected) -> None: date_types = _all_cftime_date_types() for date_type in date_types.values(): result = coding.times.format_cftime_datetime(date_type(*date_args)) @@ -639,9 +647,10 @@ def test_format_cftime_datetime(date_args, expected): @pytest.mark.parametrize("calendar", _ALL_CALENDARS) -def test_decode_cf(calendar): +def test_decode_cf(calendar) -> None: days = [1.0, 2.0, 3.0] - da = DataArray(days, coords=[days], dims=["time"], name="test") + # TODO: GH5690 — do we want to allow this type for `coords`? + da = DataArray(days, coords=[days], dims=["time"], name="test") # type: ignore ds = da.to_dataset() for v in ["test", "time"]: @@ -660,7 +669,7 @@ def test_decode_cf(calendar): assert ds.test.dtype == np.dtype("M8[ns]") -def test_decode_cf_time_bounds(): +def test_decode_cf_time_bounds() -> None: da = DataArray( np.arange(6, dtype="int64").reshape((3, 2)), @@ -703,7 +712,7 @@ def test_decode_cf_time_bounds(): @requires_cftime -def test_encode_time_bounds(): +def test_encode_time_bounds() -> None: time = pd.date_range("2000-01-16", periods=1) time_bounds = pd.date_range("2000-01-01", periods=2, freq="MS") @@ -779,41 +788,41 @@ def times_3d(times): @requires_cftime -def test_contains_cftime_datetimes_1d(data): +def test_contains_cftime_datetimes_1d(data) -> None: assert contains_cftime_datetimes(data.time) @requires_cftime @requires_dask -def test_contains_cftime_datetimes_dask_1d(data): +def test_contains_cftime_datetimes_dask_1d(data) -> None: assert contains_cftime_datetimes(data.time.chunk()) @requires_cftime -def test_contains_cftime_datetimes_3d(times_3d): +def test_contains_cftime_datetimes_3d(times_3d) -> None: assert contains_cftime_datetimes(times_3d) @requires_cftime @requires_dask -def test_contains_cftime_datetimes_dask_3d(times_3d): +def test_contains_cftime_datetimes_dask_3d(times_3d) -> None: assert contains_cftime_datetimes(times_3d.chunk()) @pytest.mark.parametrize("non_cftime_data", [DataArray([]), DataArray([1, 2])]) -def test_contains_cftime_datetimes_non_cftimes(non_cftime_data): +def test_contains_cftime_datetimes_non_cftimes(non_cftime_data) -> None: assert not contains_cftime_datetimes(non_cftime_data) @requires_dask @pytest.mark.parametrize("non_cftime_data", [DataArray([]), DataArray([1, 2])]) -def test_contains_cftime_datetimes_non_cftimes_dask(non_cftime_data): +def test_contains_cftime_datetimes_non_cftimes_dask(non_cftime_data) -> None: assert not contains_cftime_datetimes(non_cftime_data.chunk()) @requires_cftime @pytest.mark.parametrize("shape", [(24,), (8, 3), (2, 4, 3)]) -def test_encode_cf_datetime_overflow(shape): +def test_encode_cf_datetime_overflow(shape) -> None: # Test for fix to GH 2272 dates = pd.date_range("2100", periods=24).values.reshape(shape) units = "days since 1800-01-01" @@ -824,7 +833,7 @@ def test_encode_cf_datetime_overflow(shape): np.testing.assert_array_equal(dates, roundtrip) -def test_encode_expected_failures(): +def test_encode_expected_failures() -> None: dates = pd.date_range("2000", periods=3) with pytest.raises(ValueError, match="invalid time units"): @@ -833,7 +842,7 @@ def test_encode_expected_failures(): encode_cf_datetime(dates, units="days since NO_YEAR") -def test_encode_cf_datetime_pandas_min(): +def test_encode_cf_datetime_pandas_min() -> None: # GH 2623 dates = pd.date_range("2000", periods=3) num, units, calendar = encode_cf_datetime(dates) @@ -846,7 +855,7 @@ def test_encode_cf_datetime_pandas_min(): @requires_cftime -def test_time_units_with_timezone_roundtrip(calendar): +def test_time_units_with_timezone_roundtrip(calendar) -> None: # Regression test for GH 2649 expected_units = "days since 2000-01-01T00:00:00-05:00" expected_num_dates = np.array([1, 2, 3]) @@ -874,7 +883,7 @@ def test_time_units_with_timezone_roundtrip(calendar): @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS) -def test_use_cftime_default_standard_calendar_in_range(calendar): +def test_use_cftime_default_standard_calendar_in_range(calendar) -> None: numerical_dates = [0, 1] units = "days since 2000-01-01" expected = pd.date_range("2000", periods=2) @@ -888,7 +897,9 @@ def test_use_cftime_default_standard_calendar_in_range(calendar): @requires_cftime @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS) @pytest.mark.parametrize("units_year", [1500, 2500]) -def test_use_cftime_default_standard_calendar_out_of_range(calendar, units_year): +def test_use_cftime_default_standard_calendar_out_of_range( + calendar, units_year +) -> None: from cftime import num2date numerical_dates = [0, 1] @@ -905,7 +916,7 @@ def test_use_cftime_default_standard_calendar_out_of_range(calendar, units_year) @requires_cftime @pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS) @pytest.mark.parametrize("units_year", [1500, 2000, 2500]) -def test_use_cftime_default_non_standard_calendar(calendar, units_year): +def test_use_cftime_default_non_standard_calendar(calendar, units_year) -> None: from cftime import num2date numerical_dates = [0, 1] @@ -923,7 +934,7 @@ def test_use_cftime_default_non_standard_calendar(calendar, units_year): @requires_cftime @pytest.mark.parametrize("calendar", _ALL_CALENDARS) @pytest.mark.parametrize("units_year", [1500, 2000, 2500]) -def test_use_cftime_true(calendar, units_year): +def test_use_cftime_true(calendar, units_year) -> None: from cftime import num2date numerical_dates = [0, 1] @@ -939,7 +950,7 @@ def test_use_cftime_true(calendar, units_year): @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS) -def test_use_cftime_false_standard_calendar_in_range(calendar): +def test_use_cftime_false_standard_calendar_in_range(calendar) -> None: numerical_dates = [0, 1] units = "days since 2000-01-01" expected = pd.date_range("2000", periods=2) @@ -952,7 +963,7 @@ def test_use_cftime_false_standard_calendar_in_range(calendar): @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS) @pytest.mark.parametrize("units_year", [1500, 2500]) -def test_use_cftime_false_standard_calendar_out_of_range(calendar, units_year): +def test_use_cftime_false_standard_calendar_out_of_range(calendar, units_year) -> None: numerical_dates = [0, 1] units = f"days since {units_year}-01-01" with pytest.raises(OutOfBoundsDatetime): @@ -961,7 +972,7 @@ def test_use_cftime_false_standard_calendar_out_of_range(calendar, units_year): @pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS) @pytest.mark.parametrize("units_year", [1500, 2000, 2500]) -def test_use_cftime_false_non_standard_calendar(calendar, units_year): +def test_use_cftime_false_non_standard_calendar(calendar, units_year) -> None: numerical_dates = [0, 1] units = f"days since {units_year}-01-01" with pytest.raises(OutOfBoundsDatetime): @@ -970,7 +981,7 @@ def test_use_cftime_false_non_standard_calendar(calendar, units_year): @requires_cftime @pytest.mark.parametrize("calendar", _ALL_CALENDARS) -def test_decode_ambiguous_time_warns(calendar): +def test_decode_ambiguous_time_warns(calendar) -> None: # GH 4422, 4506 from cftime import num2date @@ -1003,7 +1014,9 @@ def test_decode_ambiguous_time_warns(calendar): @pytest.mark.parametrize("encoding_units", FREQUENCIES_TO_ENCODING_UNITS.values()) @pytest.mark.parametrize("freq", FREQUENCIES_TO_ENCODING_UNITS.keys()) @pytest.mark.parametrize("date_range", [pd.date_range, cftime_range]) -def test_encode_cf_datetime_defaults_to_correct_dtype(encoding_units, freq, date_range): +def test_encode_cf_datetime_defaults_to_correct_dtype( + encoding_units, freq, date_range +) -> None: if not has_cftime_1_4_1 and date_range == cftime_range: pytest.skip("Test requires cftime 1.4.1.") if (freq == "N" or encoding_units == "nanoseconds") and date_range == cftime_range: @@ -1021,7 +1034,7 @@ def test_encode_cf_datetime_defaults_to_correct_dtype(encoding_units, freq, date @pytest.mark.parametrize("freq", FREQUENCIES_TO_ENCODING_UNITS.keys()) -def test_encode_decode_roundtrip_datetime64(freq): +def test_encode_decode_roundtrip_datetime64(freq) -> None: # See GH 4045. Prior to GH 4684 this test would fail for frequencies of # "S", "L", "U", and "N". initial_time = pd.date_range("1678-01-01", periods=1) @@ -1034,7 +1047,7 @@ def test_encode_decode_roundtrip_datetime64(freq): @requires_cftime_1_4_1 @pytest.mark.parametrize("freq", ["U", "L", "S", "T", "H", "D"]) -def test_encode_decode_roundtrip_cftime(freq): +def test_encode_decode_roundtrip_cftime(freq) -> None: initial_time = cftime_range("0001", periods=1) times = initial_time.append( cftime_range("0001", periods=2, freq=freq) + timedelta(days=291000 * 365) @@ -1046,7 +1059,7 @@ def test_encode_decode_roundtrip_cftime(freq): @requires_cftime -def test__encode_datetime_with_cftime(): +def test__encode_datetime_with_cftime() -> None: # See GH 4870. cftime versions > 1.4.0 required us to adapt the # way _encode_datetime_with_cftime was written. import cftime @@ -1061,7 +1074,7 @@ def test__encode_datetime_with_cftime(): @pytest.mark.parametrize("calendar", ["gregorian", "Gregorian", "GREGORIAN"]) -def test_decode_encode_roundtrip_with_non_lowercase_letters(calendar): +def test_decode_encode_roundtrip_with_non_lowercase_letters(calendar) -> None: # See GH 5093. times = [0, 1] units = "days since 2000-01-01" diff --git a/xarray/tests/test_computation.py b/xarray/tests/test_computation.py index 2439ea30b4b..22a3efce999 100644 --- a/xarray/tests/test_computation.py +++ b/xarray/tests/test_computation.py @@ -38,7 +38,7 @@ def assert_identical(a, b): assert_array_equal(a, b) -def test_signature_properties(): +def test_signature_properties() -> None: sig = _UFuncSignature([["x"], ["x", "y"]], [["z"]]) assert sig.input_core_dims == (("x",), ("x", "y")) assert sig.output_core_dims == (("z",),) @@ -55,7 +55,7 @@ def test_signature_properties(): assert _UFuncSignature([["x"]]) != _UFuncSignature([["y"]]) -def test_result_name(): +def test_result_name() -> None: class Named: def __init__(self, name=None): self.name = name @@ -67,20 +67,20 @@ def __init__(self, name=None): assert result_name([Named("foo"), Named()]) is None -def test_ordered_set_union(): +def test_ordered_set_union() -> None: assert list(ordered_set_union([[1, 2]])) == [1, 2] assert list(ordered_set_union([[1, 2], [2, 1]])) == [1, 2] assert list(ordered_set_union([[0], [1, 2], [1, 3]])) == [0, 1, 2, 3] -def test_ordered_set_intersection(): +def test_ordered_set_intersection() -> None: assert list(ordered_set_intersection([[1, 2]])) == [1, 2] assert list(ordered_set_intersection([[1, 2], [2, 1]])) == [1, 2] assert list(ordered_set_intersection([[1, 2], [1, 3]])) == [1] assert list(ordered_set_intersection([[1, 2], [2]])) == [2] -def test_join_dict_keys(): +def test_join_dict_keys() -> None: dicts = [dict.fromkeys(keys) for keys in [["x", "y"], ["y", "z"]]] assert list(join_dict_keys(dicts, "left")) == ["x", "y"] assert list(join_dict_keys(dicts, "right")) == ["y", "z"] @@ -92,7 +92,7 @@ def test_join_dict_keys(): join_dict_keys(dicts, "foobar") -def test_collect_dict_values(): +def test_collect_dict_values() -> None: dicts = [{"x": 1, "y": 2, "z": 3}, {"z": 4}, 5] expected = [[1, 0, 5], [2, 0, 5], [3, 4, 5]] collected = collect_dict_values(dicts, ["x", "y", "z"], fill_value=0) @@ -103,7 +103,7 @@ def identity(x): return x -def test_apply_identity(): +def test_apply_identity() -> None: array = np.arange(10) variable = xr.Variable("x", array) data_array = xr.DataArray(variable, [("x", -array)]) @@ -123,7 +123,7 @@ def add(a, b): return apply_ufunc(operator.add, a, b) -def test_apply_two_inputs(): +def test_apply_two_inputs() -> None: array = np.array([1, 2, 3]) variable = xr.Variable("x", array) data_array = xr.DataArray(variable, [("x", -array)]) @@ -170,7 +170,7 @@ def test_apply_two_inputs(): assert_identical(dataset, add(zero_dataset, dataset.groupby("x"))) -def test_apply_1d_and_0d(): +def test_apply_1d_and_0d() -> None: array = np.array([1, 2, 3]) variable = xr.Variable("x", array) data_array = xr.DataArray(variable, [("x", -array)]) @@ -217,7 +217,7 @@ def test_apply_1d_and_0d(): assert_identical(dataset, add(zero_dataset, dataset.groupby("x"))) -def test_apply_two_outputs(): +def test_apply_two_outputs() -> None: array = np.arange(5) variable = xr.Variable("x", array) data_array = xr.DataArray(variable, [("x", -array)]) @@ -255,7 +255,7 @@ def func(x): @requires_dask -def test_apply_dask_parallelized_two_outputs(): +def test_apply_dask_parallelized_two_outputs() -> None: data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) def twice(obj): @@ -269,7 +269,7 @@ def func(x): assert_identical(data_array, out1) -def test_apply_input_core_dimension(): +def test_apply_input_core_dimension() -> None: def first_element(obj, dim): def func(x): return x[..., 0] @@ -329,7 +329,7 @@ def multiply(*args): assert_identical(expected, actual) -def test_apply_output_core_dimension(): +def test_apply_output_core_dimension() -> None: def stack_negative(obj): def func(x): return np.stack([x, -x], axis=-1) @@ -391,7 +391,7 @@ def func(x): assert_identical(stacked_dataset, out1) -def test_apply_exclude(): +def test_apply_exclude() -> None: def concatenate(objects, dim="x"): def func(*x): return np.concatenate(x, axis=-1) @@ -432,7 +432,7 @@ def func(*x): apply_ufunc(identity, variables[0], exclude_dims={"x"}) -def test_apply_groupby_add(): +def test_apply_groupby_add() -> None: array = np.arange(5) variable = xr.Variable("x", array) coords = {"x": -array, "y": ("x", [0, 0, 1, 1, 2])} @@ -469,7 +469,7 @@ def test_apply_groupby_add(): add(data_array.groupby("y"), data_array.groupby("x")) -def test_unified_dim_sizes(): +def test_unified_dim_sizes() -> None: assert unified_dim_sizes([xr.Variable((), 0)]) == {} assert unified_dim_sizes([xr.Variable("x", [1]), xr.Variable("x", [1])]) == {"x": 1} assert unified_dim_sizes([xr.Variable("x", [1]), xr.Variable("y", [1, 2])]) == { @@ -493,7 +493,7 @@ def test_unified_dim_sizes(): unified_dim_sizes([xr.Variable("x", [1]), xr.Variable("x", [1, 2])]) -def test_broadcast_compat_data_1d(): +def test_broadcast_compat_data_1d() -> None: data = np.arange(5) var = xr.Variable("x", data) @@ -509,7 +509,7 @@ def test_broadcast_compat_data_1d(): broadcast_compat_data(var, (), ()) -def test_broadcast_compat_data_2d(): +def test_broadcast_compat_data_2d() -> None: data = np.arange(12).reshape(3, 4) var = xr.Variable(["x", "y"], data) @@ -529,7 +529,7 @@ def test_broadcast_compat_data_2d(): ) -def test_keep_attrs(): +def test_keep_attrs() -> None: def add(a, b, keep_attrs): if keep_attrs: return apply_ufunc(operator.add, a, b, keep_attrs=keep_attrs) @@ -552,16 +552,16 @@ def add(a, b, keep_attrs): actual = add(a.variable, b.variable, keep_attrs=True) assert_identical(actual.attrs, a.attrs) - a = xr.Dataset({"x": [0, 1]}) - a.attrs["attr"] = "ds" - a.x.attrs["attr"] = "da" - b = xr.Dataset({"x": [0, 1]}) + ds_a = xr.Dataset({"x": [0, 1]}) + ds_a.attrs["attr"] = "ds" + ds_a.x.attrs["attr"] = "da" + ds_b = xr.Dataset({"x": [0, 1]}) - actual = add(a, b, keep_attrs=False) + actual = add(ds_a, ds_b, keep_attrs=False) assert not actual.attrs - actual = add(a, b, keep_attrs=True) - assert_identical(actual.attrs, a.attrs) - assert_identical(actual.x.attrs, a.x.attrs) + actual = add(ds_a, ds_b, keep_attrs=True) + assert_identical(actual.attrs, ds_a.attrs) + assert_identical(actual.x.attrs, ds_a.x.attrs) @pytest.mark.parametrize( @@ -618,7 +618,7 @@ def add(a, b, keep_attrs): ), ), ) -def test_keep_attrs_strategies_variable(strategy, attrs, expected, error): +def test_keep_attrs_strategies_variable(strategy, attrs, expected, error) -> None: a = xr.Variable("x", [0, 1], attrs=attrs[0]) b = xr.Variable("x", [0, 1], attrs=attrs[1]) c = xr.Variable("x", [0, 1], attrs=attrs[2]) @@ -687,7 +687,7 @@ def test_keep_attrs_strategies_variable(strategy, attrs, expected, error): ), ), ) -def test_keep_attrs_strategies_dataarray(strategy, attrs, expected, error): +def test_keep_attrs_strategies_dataarray(strategy, attrs, expected, error) -> None: a = xr.DataArray(dims="x", data=[0, 1], attrs=attrs[0]) b = xr.DataArray(dims="x", data=[0, 1], attrs=attrs[1]) c = xr.DataArray(dims="x", data=[0, 1], attrs=attrs[2]) @@ -852,7 +852,7 @@ def test_keep_attrs_strategies_dataarray_variables( ), ), ) -def test_keep_attrs_strategies_dataset(strategy, attrs, expected, error): +def test_keep_attrs_strategies_dataset(strategy, attrs, expected, error) -> None: a = xr.Dataset({"a": ("x", [0, 1])}, attrs=attrs[0]) b = xr.Dataset({"a": ("x", [0, 1])}, attrs=attrs[1]) c = xr.Dataset({"a": ("x", [0, 1])}, attrs=attrs[2]) @@ -959,7 +959,7 @@ def test_keep_attrs_strategies_dataset_variables( assert_identical(actual, expected) -def test_dataset_join(): +def test_dataset_join() -> None: ds0 = xr.Dataset({"a": ("x", [1, 2]), "x": [0, 1]}) ds1 = xr.Dataset({"a": ("x", [99, 3]), "x": [1, 2]}) @@ -1007,7 +1007,7 @@ def add(a, b, join, dataset_join): @requires_dask -def test_apply_dask(): +def test_apply_dask() -> None: import dask.array as da array = da.ones((2,), chunks=2) @@ -1049,7 +1049,7 @@ def dask_safe_identity(x): @requires_dask -def test_apply_dask_parallelized_one_arg(): +def test_apply_dask_parallelized_one_arg() -> None: import dask.array as da array = da.ones((2, 2), chunks=(1, 1)) @@ -1069,7 +1069,7 @@ def parallel_identity(x): @requires_dask -def test_apply_dask_parallelized_two_args(): +def test_apply_dask_parallelized_two_args() -> None: import dask.array as da array = da.ones((2, 2), chunks=(1, 1), dtype=np.int64) @@ -1097,7 +1097,7 @@ def check(x, y): @requires_dask -def test_apply_dask_parallelized_errors(): +def test_apply_dask_parallelized_errors() -> None: import dask.array as da array = da.ones((2, 2), chunks=(1, 1)) @@ -1123,7 +1123,7 @@ def test_apply_dask_parallelized_errors(): # https://github.com/dask/dask/issues/3245 @requires_dask @pytest.mark.filterwarnings("ignore:Mean of empty slice") -def test_apply_dask_multiple_inputs(): +def test_apply_dask_multiple_inputs() -> None: import dask.array as da def covariance(x, y): @@ -1166,7 +1166,7 @@ def covariance(x, y): @requires_dask -def test_apply_dask_new_output_dimension(): +def test_apply_dask_new_output_dimension() -> None: import dask.array as da array = da.ones((2, 2), chunks=(1, 1)) @@ -1195,7 +1195,7 @@ def func(x): @requires_dask -def test_apply_dask_new_output_sizes(): +def test_apply_dask_new_output_sizes() -> None: ds = xr.Dataset({"foo": (["lon", "lat"], np.arange(10 * 10).reshape((10, 10)))}) ds["bar"] = ds["foo"] newdims = {"lon_new": 3, "lat_new": 6} @@ -1224,7 +1224,7 @@ def pandas_median(x): return pd.Series(x).median() -def test_vectorize(): +def test_vectorize() -> None: data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) expected = xr.DataArray([1, 2], dims=["x"]) actual = apply_ufunc( @@ -1234,7 +1234,7 @@ def test_vectorize(): @requires_dask -def test_vectorize_dask(): +def test_vectorize_dask() -> None: # run vectorization in dask.array.gufunc by using `dask='parallelized'` data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) expected = xr.DataArray([1, 2], dims=["x"]) @@ -1250,7 +1250,7 @@ def test_vectorize_dask(): @requires_dask -def test_vectorize_dask_dtype(): +def test_vectorize_dask_dtype() -> None: # ensure output_dtypes is preserved with vectorize=True # GH4015 @@ -1290,7 +1290,7 @@ def test_vectorize_dask_dtype(): xr.DataArray([[0 + 0j, 1 + 2j, 2 + 1j]], dims=("x", "y")), ], ) -def test_vectorize_dask_dtype_without_output_dtypes(data_array): +def test_vectorize_dask_dtype_without_output_dtypes(data_array) -> None: # ensure output_dtypes is preserved with vectorize=True # GH4015 @@ -1311,7 +1311,7 @@ def test_vectorize_dask_dtype_without_output_dtypes(data_array): reason="dask/dask#7669: can no longer pass output_dtypes and meta", ) @requires_dask -def test_vectorize_dask_dtype_meta(): +def test_vectorize_dask_dtype_meta() -> None: # meta dtype takes precedence data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) expected = xr.DataArray([1, 2], dims=["x"]) @@ -1335,7 +1335,7 @@ def pandas_median_add(x, y): return pd.Series(x).median() + pd.Series(y).median() -def test_vectorize_exclude_dims(): +def test_vectorize_exclude_dims() -> None: # GH 3890 data_array_a = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) data_array_b = xr.DataArray([[0, 1, 2, 3, 4], [1, 2, 3, 4, 5]], dims=("x", "y")) @@ -1353,7 +1353,7 @@ def test_vectorize_exclude_dims(): @requires_dask -def test_vectorize_exclude_dims_dask(): +def test_vectorize_exclude_dims_dask() -> None: # GH 3890 data_array_a = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) data_array_b = xr.DataArray([[0, 1, 2, 3, 4], [1, 2, 3, 4, 5]], dims=("x", "y")) @@ -1372,7 +1372,7 @@ def test_vectorize_exclude_dims_dask(): assert_identical(expected, actual) -def test_corr_only_dataarray(): +def test_corr_only_dataarray() -> None: with pytest.raises(TypeError, match="Only xr.DataArray is supported"): xr.corr(xr.Dataset(), xr.Dataset()) @@ -1420,7 +1420,7 @@ def arrays_w_tuples(): ], ) @pytest.mark.parametrize("dim", [None, "x", "time"]) -def test_lazy_corrcov(da_a, da_b, dim, ddof): +def test_lazy_corrcov(da_a, da_b, dim, ddof) -> None: # GH 5284 from dask import is_dask_collection @@ -1438,7 +1438,7 @@ def test_lazy_corrcov(da_a, da_b, dim, ddof): [arrays_w_tuples()[1][0], arrays_w_tuples()[1][1], arrays_w_tuples()[1][2]], ) @pytest.mark.parametrize("dim", [None, "time"]) -def test_cov(da_a, da_b, dim, ddof): +def test_cov(da_a, da_b, dim, ddof) -> None: if dim is not None: def np_cov_ind(ts1, ts2, a, x): @@ -1490,7 +1490,7 @@ def np_cov(ts1, ts2): [arrays_w_tuples()[1][0], arrays_w_tuples()[1][1], arrays_w_tuples()[1][2]], ) @pytest.mark.parametrize("dim", [None, "time"]) -def test_corr(da_a, da_b, dim): +def test_corr(da_a, da_b, dim) -> None: if dim is not None: def np_corr_ind(ts1, ts2, a, x): @@ -1538,7 +1538,7 @@ def np_corr(ts1, ts2): arrays_w_tuples()[1], ) @pytest.mark.parametrize("dim", [None, "time", "x"]) -def test_covcorr_consistency(da_a, da_b, dim): +def test_covcorr_consistency(da_a, da_b, dim) -> None: # Testing that xr.corr and xr.cov are consistent with each other # 1. Broadcast the two arrays da_a, da_b = broadcast(da_a, da_b) @@ -1559,7 +1559,7 @@ def test_covcorr_consistency(da_a, da_b, dim): arrays_w_tuples()[0], ) @pytest.mark.parametrize("dim", [None, "time", "x", ["time", "x"]]) -def test_autocov(da_a, dim): +def test_autocov(da_a, dim) -> None: # Testing that the autocovariance*(N-1) is ~=~ to the variance matrix # 1. Ignore the nans valid_values = da_a.notnull() @@ -1571,7 +1571,7 @@ def test_autocov(da_a, dim): @requires_dask -def test_vectorize_dask_new_output_dims(): +def test_vectorize_dask_new_output_dims() -> None: # regression test for GH3574 # run vectorization in dask.array.gufunc by using `dask='parallelized'` data_array = xr.DataArray([[0, 1, 2], [1, 2, 3]], dims=("x", "y")) @@ -1614,7 +1614,7 @@ def test_vectorize_dask_new_output_dims(): ) -def test_output_wrong_number(): +def test_output_wrong_number() -> None: variable = xr.Variable("x", np.arange(10)) def identity(x): @@ -1630,7 +1630,7 @@ def tuple3x(x): apply_ufunc(tuple3x, variable, output_core_dims=[(), ()]) -def test_output_wrong_dims(): +def test_output_wrong_dims() -> None: variable = xr.Variable("x", np.arange(10)) def add_dim(x): @@ -1649,7 +1649,7 @@ def remove_dim(x): apply_ufunc(remove_dim, variable) -def test_output_wrong_dim_size(): +def test_output_wrong_dim_size() -> None: array = np.arange(10) variable = xr.Variable("x", array) data_array = xr.DataArray(variable, [("x", -array)]) @@ -1710,7 +1710,7 @@ def apply_truncate_x_x_valid(obj): @pytest.mark.parametrize("use_dask", [True, False]) -def test_dot(use_dask): +def test_dot(use_dask) -> None: if use_dask: if not has_dask: pytest.skip("test for dask.") @@ -1840,7 +1840,7 @@ def test_dot(use_dask): @pytest.mark.parametrize("use_dask", [True, False]) -def test_dot_align_coords(use_dask): +def test_dot_align_coords(use_dask) -> None: # GH 3694 if use_dask: @@ -1893,7 +1893,7 @@ def test_dot_align_coords(use_dask): xr.testing.assert_allclose(expected, actual) -def test_where(): +def test_where() -> None: cond = xr.DataArray([True, False], dims="x") actual = xr.where(cond, 1, 0) expected = xr.DataArray([1, 0], dims="x") @@ -1902,7 +1902,7 @@ def test_where(): @pytest.mark.parametrize("use_dask", [True, False]) @pytest.mark.parametrize("use_datetime", [True, False]) -def test_polyval(use_dask, use_datetime): +def test_polyval(use_dask, use_datetime) -> None: if use_dask and not has_dask: pytest.skip("requires dask") diff --git a/xarray/tests/test_conventions.py b/xarray/tests/test_conventions.py index ceea167719f..b364b405423 100644 --- a/xarray/tests/test_conventions.py +++ b/xarray/tests/test_conventions.py @@ -23,7 +23,7 @@ class TestBoolTypeArray: - def test_booltype_array(self): + def test_booltype_array(self) -> None: x = np.array([1, 0, 1, 1, 0], dtype="i1") bx = conventions.BoolTypeArray(x) assert bx.dtype == bool @@ -31,7 +31,7 @@ def test_booltype_array(self): class TestNativeEndiannessArray: - def test(self): + def test(self) -> None: x = np.arange(5, dtype=">i8") expected = np.arange(5, dtype="int64") a = conventions.NativeEndiannessArray(x) @@ -40,7 +40,7 @@ def test(self): assert_array_equal(a, expected) -def test_decode_cf_with_conflicting_fill_missing_value(): +def test_decode_cf_with_conflicting_fill_missing_value() -> None: expected = Variable(["t"], [np.nan, np.nan, 2], {"units": "foobar"}) var = Variable( ["t"], np.arange(3), {"units": "foobar", "missing_value": 0, "_FillValue": 1} @@ -75,7 +75,7 @@ def test_decode_cf_with_conflicting_fill_missing_value(): @requires_cftime class TestEncodeCFVariable: - def test_incompatible_attributes(self): + def test_incompatible_attributes(self) -> None: invalid_vars = [ Variable( ["t"], pd.date_range("2000-01-01", periods=3), {"units": "foobar"} @@ -88,13 +88,13 @@ def test_incompatible_attributes(self): with pytest.raises(ValueError): conventions.encode_cf_variable(var) - def test_missing_fillvalue(self): + def test_missing_fillvalue(self) -> None: v = Variable(["x"], np.array([np.nan, 1, 2, 3])) v.encoding = {"dtype": "int16"} with pytest.warns(Warning, match="floating point data as an integer"): conventions.encode_cf_variable(v) - def test_multidimensional_coordinates(self): + def test_multidimensional_coordinates(self) -> None: # regression test for GH1763 # Set up test case with coordinates that have overlapping (but not # identical) dimensions. @@ -128,7 +128,7 @@ def test_multidimensional_coordinates(self): # Should not have any global coordinates. assert "coordinates" not in attrs - def test_do_not_overwrite_user_coordinates(self): + def test_do_not_overwrite_user_coordinates(self) -> None: orig = Dataset( coords={"x": [0, 1, 2], "y": ("x", [5, 6, 7]), "z": ("x", [8, 9, 10])}, data_vars={"a": ("x", [1, 2, 3]), "b": ("x", [3, 5, 6])}, @@ -142,7 +142,7 @@ def test_do_not_overwrite_user_coordinates(self): with pytest.raises(ValueError, match=r"'coordinates' found in both attrs"): conventions.encode_dataset_coordinates(orig) - def test_emit_coordinates_attribute_in_attrs(self): + def test_emit_coordinates_attribute_in_attrs(self) -> None: orig = Dataset( {"a": 1, "b": 1}, coords={"t": np.array("2004-11-01T00:00:00", dtype=np.datetime64)}, @@ -159,7 +159,7 @@ def test_emit_coordinates_attribute_in_attrs(self): assert enc["b"].attrs.get("coordinates") == "t" assert "coordinates" not in enc["b"].encoding - def test_emit_coordinates_attribute_in_encoding(self): + def test_emit_coordinates_attribute_in_encoding(self) -> None: orig = Dataset( {"a": 1, "b": 1}, coords={"t": np.array("2004-11-01T00:00:00", dtype=np.datetime64)}, @@ -177,7 +177,7 @@ def test_emit_coordinates_attribute_in_encoding(self): assert "coordinates" not in enc["b"].encoding @requires_dask - def test_string_object_warning(self): + def test_string_object_warning(self) -> None: original = Variable(("x",), np.array(["foo", "bar"], dtype=object)).chunk() with pytest.warns(SerializationWarning, match="dask array with dtype=object"): encoded = conventions.encode_cf_variable(original) @@ -186,7 +186,7 @@ def test_string_object_warning(self): @requires_cftime class TestDecodeCF: - def test_dataset(self): + def test_dataset(self) -> None: original = Dataset( { "t": ("t", [0, 1, 2], {"units": "days since 2000-01-01"}), @@ -204,13 +204,13 @@ def test_dataset(self): actual = conventions.decode_cf(original) assert_identical(expected, actual) - def test_invalid_coordinates(self): + def test_invalid_coordinates(self) -> None: # regression test for GH308 original = Dataset({"foo": ("t", [1, 2], {"coordinates": "invalid"})}) actual = conventions.decode_cf(original) assert_identical(original, actual) - def test_decode_coordinates(self): + def test_decode_coordinates(self) -> None: # regression test for GH610 original = Dataset( {"foo": ("t", [1, 2], {"coordinates": "x"}), "x": ("t", [4, 5])} @@ -218,13 +218,13 @@ def test_decode_coordinates(self): actual = conventions.decode_cf(original) assert actual.foo.encoding["coordinates"] == "x" - def test_0d_int32_encoding(self): + def test_0d_int32_encoding(self) -> None: original = Variable((), np.int32(0), encoding={"dtype": "int64"}) expected = Variable((), np.int64(0)) actual = conventions.maybe_encode_nonstring_dtype(original) assert_identical(expected, actual) - def test_decode_cf_with_multiple_missing_values(self): + def test_decode_cf_with_multiple_missing_values(self) -> None: original = Variable(["t"], [0, 1, 2], {"missing_value": np.array([0, 1])}) expected = Variable(["t"], [np.nan, np.nan, 2], {}) with warnings.catch_warnings(record=True) as w: @@ -232,7 +232,7 @@ def test_decode_cf_with_multiple_missing_values(self): assert_identical(expected, actual) assert "has multiple fill" in str(w[0].message) - def test_decode_cf_with_drop_variables(self): + def test_decode_cf_with_drop_variables(self) -> None: original = Dataset( { "t": ("t", [0, 1, 2], {"units": "days since 2000-01-01"}), @@ -262,13 +262,13 @@ def test_decode_cf_with_drop_variables(self): assert_identical(expected, actual2) @pytest.mark.filterwarnings("ignore:Ambiguous reference date string") - def test_invalid_time_units_raises_eagerly(self): + def test_invalid_time_units_raises_eagerly(self) -> None: ds = Dataset({"time": ("time", [0, 1], {"units": "foobar since 123"})}) with pytest.raises(ValueError, match=r"unable to decode time"): decode_cf(ds) @requires_cftime - def test_dataset_repr_with_netcdf4_datetimes(self): + def test_dataset_repr_with_netcdf4_datetimes(self) -> None: # regression test for #347 attrs = {"units": "days since 0001-01-01", "calendar": "noleap"} with warnings.catch_warnings(): @@ -281,7 +281,7 @@ def test_dataset_repr_with_netcdf4_datetimes(self): assert "(time) datetime64[ns]" in repr(ds) @requires_cftime - def test_decode_cf_datetime_transition_to_invalid(self): + def test_decode_cf_datetime_transition_to_invalid(self) -> None: # manually create dataset with not-decoded date from datetime import datetime @@ -297,7 +297,7 @@ def test_decode_cf_datetime_transition_to_invalid(self): assert_array_equal(ds_decoded.time.values, expected) @requires_dask - def test_decode_cf_with_dask(self): + def test_decode_cf_with_dask(self) -> None: import dask.array as da original = Dataset( @@ -319,7 +319,7 @@ def test_decode_cf_with_dask(self): assert_identical(decoded, conventions.decode_cf(original).compute()) @requires_dask - def test_decode_dask_times(self): + def test_decode_dask_times(self) -> None: original = Dataset.from_dict( { "coords": {}, @@ -338,7 +338,7 @@ def test_decode_dask_times(self): conventions.decode_cf(original).chunk(), ) - def test_decode_cf_time_kwargs(self): + def test_decode_cf_time_kwargs(self) -> None: ds = Dataset.from_dict( { "coords": { @@ -401,18 +401,18 @@ def roundtrip( yield open_dataset(store, **open_kwargs) @pytest.mark.skip("cannot roundtrip coordinates yet for CFEncodedInMemoryStore") - def test_roundtrip_coordinates(self): + def test_roundtrip_coordinates(self) -> None: pass - def test_invalid_dataarray_names_raise(self): + def test_invalid_dataarray_names_raise(self) -> None: # only relevant for on-disk file formats pass - def test_encoding_kwarg(self): + def test_encoding_kwarg(self) -> None: # we haven't bothered to raise errors yet for unexpected encodings in # this test dummy pass - def test_encoding_kwarg_fixed_width_string(self): + def test_encoding_kwarg_fixed_width_string(self) -> None: # CFEncodedInMemoryStore doesn't support explicit string encodings. pass diff --git a/xarray/tests/test_cupy.py b/xarray/tests/test_cupy.py index 69f43d99139..e8f35e12ac6 100644 --- a/xarray/tests/test_cupy.py +++ b/xarray/tests/test_cupy.py @@ -39,18 +39,18 @@ def toy_weather_data(): return ds -def test_cupy_import(): +def test_cupy_import() -> None: """Check the import worked.""" assert cp -def test_check_data_stays_on_gpu(toy_weather_data): +def test_check_data_stays_on_gpu(toy_weather_data) -> None: """Perform some operations and check the data stays on the GPU.""" freeze = (toy_weather_data["tmin"] <= 0).groupby("time.month").mean("time") assert isinstance(freeze.data, cp.ndarray) -def test_where(): +def test_where() -> None: from xarray.core.duck_array_ops import where data = cp.zeros(10) diff --git a/xarray/tests/test_distributed.py b/xarray/tests/test_distributed.py index ab0d1d9f22c..ef1ce50d6ea 100644 --- a/xarray/tests/test_distributed.py +++ b/xarray/tests/test_distributed.py @@ -132,13 +132,13 @@ def test_dask_distributed_read_netcdf_integration_test( @requires_zarr @pytest.mark.parametrize("consolidated", [True, False]) @pytest.mark.parametrize("compute", [True, False]) -def test_dask_distributed_zarr_integration_test(loop, consolidated, compute): +def test_dask_distributed_zarr_integration_test(loop, consolidated, compute) -> None: if consolidated: pytest.importorskip("zarr", minversion="2.2.1.dev2") write_kwargs = {"consolidated": True} read_kwargs = {"backend_kwargs": {"consolidated": True}} else: - write_kwargs = read_kwargs = {} + write_kwargs = read_kwargs = {} # type: ignore chunks = {"dim1": 4, "dim2": 3, "dim3": 5} with cluster() as (s, [a, b]): with Client(s["address"], loop=loop): @@ -160,7 +160,7 @@ def test_dask_distributed_zarr_integration_test(loop, consolidated, compute): @requires_rasterio -def test_dask_distributed_rasterio_integration_test(loop): +def test_dask_distributed_rasterio_integration_test(loop) -> None: with create_tmp_geotiff() as (tmp_file, expected): with cluster() as (s, [a, b]): with Client(s["address"], loop=loop): @@ -172,7 +172,7 @@ def test_dask_distributed_rasterio_integration_test(loop): @requires_cfgrib @pytest.mark.filterwarnings("ignore:deallocating CachingFileManager") -def test_dask_distributed_cfgrib_integration_test(loop): +def test_dask_distributed_cfgrib_integration_test(loop) -> None: with cluster() as (s, [a, b]): with Client(s["address"], loop=loop): with open_example_dataset( @@ -185,7 +185,7 @@ def test_dask_distributed_cfgrib_integration_test(loop): @gen_cluster(client=True) -async def test_async(c, s, a, b): +async def test_async(c, s, a, b) -> None: x = create_test_data() assert not dask.is_dask_collection(x) y = x.chunk({"dim2": 4}) + 10 @@ -212,12 +212,12 @@ async def test_async(c, s, a, b): assert s.tasks -def test_hdf5_lock(): +def test_hdf5_lock() -> None: assert isinstance(HDF5_LOCK, dask.utils.SerializableLock) @gen_cluster(client=True) -async def test_serializable_locks(c, s, a, b): +async def test_serializable_locks(c, s, a, b) -> None: def f(x, lock=None): with lock: return x + 1 diff --git a/xarray/tests/test_extensions.py b/xarray/tests/test_extensions.py index 2d9fa11dda3..5ca3a35e238 100644 --- a/xarray/tests/test_extensions.py +++ b/xarray/tests/test_extensions.py @@ -17,7 +17,7 @@ def __init__(self, xarray_obj): class TestAccessor: - def test_register(self): + def test_register(self) -> None: @xr.register_dataset_accessor("demo") @xr.register_dataarray_accessor("demo") class DemoAccessor: @@ -41,12 +41,13 @@ def foo(self): # check descriptor assert ds.demo.__doc__ == "Demo accessor." - assert xr.Dataset.demo.__doc__ == "Demo accessor." - assert isinstance(ds.demo, DemoAccessor) - assert xr.Dataset.demo is DemoAccessor + # TODO: typing doesn't seem to work with accessors + assert xr.Dataset.demo.__doc__ == "Demo accessor." # type: ignore + assert isinstance(ds.demo, DemoAccessor) # type: ignore + assert xr.Dataset.demo is DemoAccessor # type: ignore # ensure we can remove it - del xr.Dataset.demo + del xr.Dataset.demo # type: ignore assert not hasattr(xr.Dataset, "demo") with pytest.warns(Warning, match="overriding a preexisting attribute"): @@ -58,7 +59,7 @@ class Foo: # it didn't get registered again assert not hasattr(xr.Dataset, "demo") - def test_pickle_dataset(self): + def test_pickle_dataset(self) -> None: ds = xr.Dataset() ds_restored = pickle.loads(pickle.dumps(ds)) assert_identical(ds, ds_restored) @@ -70,13 +71,13 @@ def test_pickle_dataset(self): assert_identical(ds, ds_restored) assert ds_restored.example_accessor.value == "foo" - def test_pickle_dataarray(self): + def test_pickle_dataarray(self) -> None: array = xr.Dataset() assert array.example_accessor is array.example_accessor array_restored = pickle.loads(pickle.dumps(array)) assert_identical(array, array_restored) - def test_broken_accessor(self): + def test_broken_accessor(self) -> None: # regression test for GH933 @xr.register_dataset_accessor("stupid_accessor") diff --git a/xarray/tests/test_formatting.py b/xarray/tests/test_formatting.py index b9ba57f99dc..594e1829b07 100644 --- a/xarray/tests/test_formatting.py +++ b/xarray/tests/test_formatting.py @@ -12,7 +12,7 @@ class TestFormatting: - def test_get_indexer_at_least_n_items(self): + def test_get_indexer_at_least_n_items(self) -> None: cases = [ ((20,), (slice(10),), (slice(-10, None),)), ((3, 20), (0, slice(10)), (-1, slice(-10, None))), @@ -43,7 +43,7 @@ def test_get_indexer_at_least_n_items(self): actual = formatting._get_indexer_at_least_n_items(shape, 10, from_end=True) assert end_expected == actual - def test_first_n_items(self): + def test_first_n_items(self) -> None: array = np.arange(100).reshape(10, 5, 2) for n in [3, 10, 13, 100, 200]: actual = formatting.first_n_items(array, n) @@ -53,7 +53,7 @@ def test_first_n_items(self): with pytest.raises(ValueError, match=r"at least one item"): formatting.first_n_items(array, 0) - def test_last_n_items(self): + def test_last_n_items(self) -> None: array = np.arange(100).reshape(10, 5, 2) for n in [3, 10, 13, 100, 200]: actual = formatting.last_n_items(array, n) @@ -63,7 +63,7 @@ def test_last_n_items(self): with pytest.raises(ValueError, match=r"at least one item"): formatting.first_n_items(array, 0) - def test_last_item(self): + def test_last_item(self) -> None: array = np.arange(100) reshape = ((10, 10), (1, 100), (2, 2, 5, 5)) @@ -73,7 +73,7 @@ def test_last_item(self): result = formatting.last_item(array.reshape(r)) assert result == expected - def test_format_item(self): + def test_format_item(self) -> None: cases = [ (pd.Timestamp("2000-01-01T12"), "2000-01-01T12:00:00"), (pd.Timestamp("2000-01-01"), "2000-01-01"), @@ -94,7 +94,7 @@ def test_format_item(self): actual = formatting.format_item(item) assert expected == actual - def test_format_items(self): + def test_format_items(self) -> None: cases = [ (np.arange(4) * np.timedelta64(1, "D"), "0 days 1 days 2 days 3 days"), ( @@ -116,7 +116,7 @@ def test_format_items(self): actual = " ".join(formatting.format_items(item)) assert expected == actual - def test_format_array_flat(self): + def test_format_array_flat(self) -> None: actual = formatting.format_array_flat(np.arange(100), 2) expected = "..." assert expected == actual @@ -180,14 +180,14 @@ def test_format_array_flat(self): expected = "'hello world hello..." assert expected == actual - def test_pretty_print(self): + def test_pretty_print(self) -> None: assert formatting.pretty_print("abcdefghij", 8) == "abcde..." assert formatting.pretty_print("ß", 1) == "ß" - def test_maybe_truncate(self): + def test_maybe_truncate(self) -> None: assert formatting.maybe_truncate("ß", 10) == "ß" - def test_format_timestamp_out_of_bounds(self): + def test_format_timestamp_out_of_bounds(self) -> None: from datetime import datetime date = datetime(1300, 12, 1) @@ -200,7 +200,7 @@ def test_format_timestamp_out_of_bounds(self): result = formatting.format_timestamp(date) assert result == expected - def test_attribute_repr(self): + def test_attribute_repr(self) -> None: short = formatting.summarize_attr("key", "Short string") long = formatting.summarize_attr("key", 100 * "Very long string ") newlines = formatting.summarize_attr("key", "\n\n\n") @@ -211,7 +211,7 @@ def test_attribute_repr(self): assert "\n" not in newlines assert "\t" not in tabs - def test_diff_array_repr(self): + def test_diff_array_repr(self) -> None: da_a = xr.DataArray( np.array([[1, 2, 3], [4, 5, 6]], dtype="int64"), dims=("x", "y"), @@ -291,7 +291,7 @@ def test_diff_array_repr(self): assert actual == expected.replace(", dtype=int64", "") @pytest.mark.filterwarnings("error") - def test_diff_attrs_repr_with_array(self): + def test_diff_attrs_repr_with_array(self) -> None: attrs_a = {"attr": np.array([0, 1])} attrs_b = {"attr": 1} @@ -328,7 +328,7 @@ def test_diff_attrs_repr_with_array(self): actual = formatting.diff_attrs_repr(attrs_a, attrs_b, "equals") assert expected == actual - def test_diff_dataset_repr(self): + def test_diff_dataset_repr(self) -> None: ds_a = xr.Dataset( data_vars={ "var1": (("x", "y"), np.array([[1, 2, 3], [4, 5, 6]], dtype="int64")), @@ -380,7 +380,7 @@ def test_diff_dataset_repr(self): actual = formatting.diff_dataset_repr(ds_a, ds_b, "identical") assert actual == expected - def test_array_repr(self): + def test_array_repr(self) -> None: ds = xr.Dataset(coords={"foo": [1, 2, 3], "bar": [1, 2, 3]}) ds[(1, 2)] = xr.DataArray([0], dims="test") actual = formatting.array_repr(ds[(1, 2)]) @@ -404,7 +404,7 @@ def test_array_repr(self): assert actual == expected - def test_array_repr_variable(self): + def test_array_repr_variable(self) -> None: var = xr.Variable("x", [0, 1]) formatting.array_repr(var) @@ -413,7 +413,7 @@ def test_array_repr_variable(self): formatting.array_repr(var) -def test_inline_variable_array_repr_custom_repr(): +def test_inline_variable_array_repr_custom_repr() -> None: class CustomArray: def __init__(self, value, attr): self.value = value @@ -450,7 +450,7 @@ def ndim(self): assert actual == value._repr_inline_(max_width) -def test_set_numpy_options(): +def test_set_numpy_options() -> None: original_options = np.get_printoptions() with formatting.set_numpy_options(threshold=10): assert len(repr(np.arange(500))) < 200 @@ -458,7 +458,7 @@ def test_set_numpy_options(): assert np.get_printoptions() == original_options -def test_short_numpy_repr(): +def test_short_numpy_repr() -> None: cases = [ np.random.randn(500), np.random.randn(20, 20), @@ -474,7 +474,7 @@ def test_short_numpy_repr(): assert num_lines < 30 -def test_large_array_repr_length(): +def test_large_array_repr_length() -> None: da = xr.DataArray(np.random.randn(100, 5, 1)) @@ -483,7 +483,7 @@ def test_large_array_repr_length(): @requires_netCDF4 -def test_repr_file_collapsed(tmp_path): +def test_repr_file_collapsed(tmp_path) -> None: arr = xr.DataArray(np.arange(300), dims="test") arr.to_netcdf(tmp_path / "test.nc", engine="netcdf4") @@ -505,7 +505,7 @@ def test_repr_file_collapsed(tmp_path): "display_max_rows, n_vars, n_attr", [(50, 40, 30), (35, 40, 30), (11, 40, 30), (1, 40, 30)], ) -def test__mapping_repr(display_max_rows, n_vars, n_attr): +def test__mapping_repr(display_max_rows, n_vars, n_attr) -> None: long_name = "long_name" a = np.core.defchararray.add(long_name, np.arange(0, n_vars).astype(str)) b = np.core.defchararray.add("attr_", np.arange(0, n_attr).astype(str)) diff --git a/xarray/tests/test_formatting_html.py b/xarray/tests/test_formatting_html.py index 09c6fa0cf3c..51303a7c4d8 100644 --- a/xarray/tests/test_formatting_html.py +++ b/xarray/tests/test_formatting_html.py @@ -1,4 +1,5 @@ import numpy as np +from typing import Dict, List import pandas as pd import pytest @@ -44,47 +45,49 @@ def dataset(): ) -def test_short_data_repr_html(dataarray): +def test_short_data_repr_html(dataarray) -> None: data_repr = fh.short_data_repr_html(dataarray) assert data_repr.startswith("
array")
 
 
-def test_short_data_repr_html_non_str_keys(dataset):
+def test_short_data_repr_html_non_str_keys(dataset) -> None:
     ds = dataset.assign({2: lambda x: x["tmin"]})
     fh.dataset_repr(ds)
 
 
-def test_short_data_repr_html_dask(dask_dataarray):
+def test_short_data_repr_html_dask(dask_dataarray) -> None:
     assert hasattr(dask_dataarray.data, "_repr_html_")
     data_repr = fh.short_data_repr_html(dask_dataarray)
     assert data_repr == dask_dataarray.data._repr_html_()
 
 
-def test_format_dims_no_dims():
-    dims, coord_names = {}, []
+def test_format_dims_no_dims() -> None:
+    dims: Dict = {}
+    coord_names: List = []
     formatted = fh.format_dims(dims, coord_names)
     assert formatted == ""
 
 
-def test_format_dims_unsafe_dim_name():
-    dims, coord_names = {"": 3, "y": 2}, []
+def test_format_dims_unsafe_dim_name() -> None:
+    dims = {"": 3, "y": 2}
+    coord_names: List = []
     formatted = fh.format_dims(dims, coord_names)
     assert "<x>" in formatted
 
 
-def test_format_dims_non_index():
+def test_format_dims_non_index() -> None:
     dims, coord_names = {"x": 3, "y": 2}, ["time"]
     formatted = fh.format_dims(dims, coord_names)
     assert "class='xr-has-index'" not in formatted
 
 
-def test_format_dims_index():
+def test_format_dims_index() -> None:
     dims, coord_names = {"x": 3, "y": 2}, ["x"]
     formatted = fh.format_dims(dims, coord_names)
     assert "class='xr-has-index'" in formatted
 
 
-def test_summarize_attrs_with_unsafe_attr_name_and_value():
+def test_summarize_attrs_with_unsafe_attr_name_and_value() -> None:
     attrs = {"": 3, "y": ""}
     formatted = fh.summarize_attrs(attrs)
     assert "
<x> :
" in formatted @@ -93,7 +96,7 @@ def test_summarize_attrs_with_unsafe_attr_name_and_value(): assert "
<pd.DataFrame>
" in formatted -def test_repr_of_dataarray(dataarray): +def test_repr_of_dataarray(dataarray) -> None: formatted = fh.array_repr(dataarray) assert "dim_0" in formatted # has an expanded data section @@ -115,7 +118,7 @@ def test_repr_of_dataarray(dataarray): ) -def test_summary_of_multiindex_coord(multiindex): +def test_summary_of_multiindex_coord(multiindex) -> None: idx = multiindex.x.variable.to_index_variable() formatted = fh._summarize_coord_multiindex("foo", idx) assert "(level_1, level_2)" in formatted @@ -123,12 +126,12 @@ def test_summary_of_multiindex_coord(multiindex): assert "foo" in formatted -def test_repr_of_multiindex(multiindex): +def test_repr_of_multiindex(multiindex) -> None: formatted = fh.dataset_repr(multiindex) assert "(x)" in formatted -def test_repr_of_dataset(dataset): +def test_repr_of_dataset(dataset) -> None: formatted = fh.dataset_repr(dataset) # coords, attrs, and data_vars are expanded assert ( @@ -152,14 +155,14 @@ def test_repr_of_dataset(dataset): assert "<IA>" in formatted -def test_repr_text_fallback(dataset): +def test_repr_text_fallback(dataset) -> None: formatted = fh.dataset_repr(dataset) # Just test that the "pre" block used for fallback to plain text is present. assert "
" in formatted
 
 
-def test_variable_repr_html():
+def test_variable_repr_html() -> None:
     v = xr.Variable(["time", "x"], [[1, 2, 3], [4, 5, 6]], {"foo": "bar"})
     assert hasattr(v, "_repr_html_")
     with xr.set_options(display_style="html"):
@@ -171,7 +174,7 @@ def test_variable_repr_html():
     assert "xarray.Variable" in html
 
 
-def test_repr_of_nonstr_dataset(dataset):
+def test_repr_of_nonstr_dataset(dataset) -> None:
     ds = dataset.copy()
     ds.attrs[1] = "Test value"
     ds[2] = ds["tmin"]
@@ -180,7 +183,7 @@ def test_repr_of_nonstr_dataset(dataset):
     assert "
2" in formatted -def test_repr_of_nonstr_dataarray(dataarray): +def test_repr_of_nonstr_dataarray(dataarray) -> None: da = dataarray.rename(dim_0=15) da.attrs[1] = "value" formatted = fh.array_repr(da) @@ -188,7 +191,7 @@ def test_repr_of_nonstr_dataarray(dataarray): assert "
  • 15: 4
  • " in formatted -def test_nonstr_variable_repr_html(): +def test_nonstr_variable_repr_html() -> None: v = xr.Variable(["time", 10], [[1, 2, 3], [4, 5, 6]], {22: "bar"}) assert hasattr(v, "_repr_html_") with xr.set_options(display_style="html"): diff --git a/xarray/tests/test_groupby.py b/xarray/tests/test_groupby.py index b2510141d78..1bae9b7429c 100644 --- a/xarray/tests/test_groupby.py +++ b/xarray/tests/test_groupby.py @@ -25,7 +25,7 @@ def array(dataset): return dataset["foo"] -def test_consolidate_slices(): +def test_consolidate_slices() -> None: assert _consolidate_slices([slice(3), slice(3, 5)]) == [slice(5)] assert _consolidate_slices([slice(2, 3), slice(3, 6)]) == [slice(2, 6)] @@ -38,7 +38,7 @@ def test_consolidate_slices(): _consolidate_slices([slice(3), 4]) -def test_groupby_dims_property(dataset): +def test_groupby_dims_property(dataset) -> None: assert dataset.groupby("x").dims == dataset.isel(x=1).dims assert dataset.groupby("y").dims == dataset.isel(y=1).dims @@ -46,7 +46,7 @@ def test_groupby_dims_property(dataset): assert stacked.groupby("xy").dims == stacked.isel(xy=0).dims -def test_multi_index_groupby_map(dataset): +def test_multi_index_groupby_map(dataset) -> None: # regression test for GH873 ds = dataset.isel(z=1, drop=True)[["foo"]] expected = 2 * ds @@ -59,7 +59,7 @@ def test_multi_index_groupby_map(dataset): assert_equal(expected, actual) -def test_multi_index_groupby_sum(): +def test_multi_index_groupby_sum() -> None: # regression test for GH873 ds = xr.Dataset( {"foo": (("x", "y", "z"), np.ones((3, 4, 2)))}, @@ -70,7 +70,7 @@ def test_multi_index_groupby_sum(): assert_equal(expected, actual) -def test_groupby_da_datetime(): +def test_groupby_da_datetime() -> None: # test groupby with a DataArray of dtype datetime for GH1132 # create test data times = pd.date_range("2000-01-01", periods=4) @@ -90,7 +90,7 @@ def test_groupby_da_datetime(): assert_equal(expected, actual) -def test_groupby_duplicate_coordinate_labels(): +def test_groupby_duplicate_coordinate_labels() -> None: # fix for http://stackoverflow.com/questions/38065129 array = xr.DataArray([1, 2, 3], [("x", [1, 1, 2])]) expected = xr.DataArray([3, 3], [("x", [1, 2])]) @@ -98,7 +98,7 @@ def test_groupby_duplicate_coordinate_labels(): assert_equal(expected, actual) -def test_groupby_input_mutation(): +def test_groupby_input_mutation() -> None: # regression test for GH2153 array = xr.DataArray([1, 2, 3], [("x", [2, 2, 1])]) array_copy = array.copy() @@ -115,7 +115,7 @@ def test_groupby_input_mutation(): xr.Dataset({"foo": ("x", [1, 2, 3, 4, 5, 6])}, {"x": [1, 1, 1, 2, 2, 2]}), ], ) -def test_groupby_map_shrink_groups(obj): +def test_groupby_map_shrink_groups(obj) -> None: expected = obj.isel(x=[0, 1, 3, 4]) actual = obj.groupby("x").map(lambda f: f.isel(x=[0, 1])) assert_identical(expected, actual) @@ -128,7 +128,7 @@ def test_groupby_map_shrink_groups(obj): xr.Dataset({"foo": ("x", [1, 2, 3])}, {"x": [1, 2, 2]}), ], ) -def test_groupby_map_change_group_size(obj): +def test_groupby_map_change_group_size(obj) -> None: def func(group): if group.sizes["x"] == 1: result = group.isel(x=[0, 0]) @@ -141,7 +141,7 @@ def func(group): assert_identical(expected, actual) -def test_da_groupby_map_func_args(): +def test_da_groupby_map_func_args() -> None: def func(arg1, arg2, arg3=0): return arg1 + arg2 + arg3 @@ -151,7 +151,7 @@ def func(arg1, arg2, arg3=0): assert_identical(expected, actual) -def test_ds_groupby_map_func_args(): +def test_ds_groupby_map_func_args() -> None: def func(arg1, arg2, arg3=0): return arg1 + arg2 + arg3 @@ -161,7 +161,7 @@ def func(arg1, arg2, arg3=0): assert_identical(expected, actual) -def test_da_groupby_empty(): +def test_da_groupby_empty() -> None: empty_array = xr.DataArray([], dims="dim") @@ -169,7 +169,7 @@ def test_da_groupby_empty(): empty_array.groupby("dim") -def test_da_groupby_quantile(): +def test_da_groupby_quantile() -> None: array = xr.DataArray( data=[1, 2, 3, 4, 5, 6], coords={"x": [1, 1, 1, 2, 2, 2]}, dims="x" @@ -265,7 +265,7 @@ def test_da_groupby_quantile(): assert_identical(expected, actual) -def test_ds_groupby_quantile(): +def test_ds_groupby_quantile() -> None: ds = xr.Dataset( data_vars={"a": ("x", [1, 2, 3, 4, 5, 6])}, coords={"x": [1, 1, 1, 2, 2, 2]} ) @@ -359,7 +359,7 @@ def test_ds_groupby_quantile(): assert_identical(expected, actual) -def test_da_groupby_assign_coords(): +def test_da_groupby_assign_coords() -> None: actual = xr.DataArray( [[3, 4, 5], [6, 7, 8]], dims=["y", "x"], coords={"y": range(2), "x": range(3)} ) @@ -386,7 +386,7 @@ def test_da_groupby_assign_coords(): @pytest.mark.parametrize("dim", ["x", "y", "z", "month"]) @pytest.mark.parametrize("obj", [repr_da, repr_da.to_dataset(name="a")]) -def test_groupby_repr(obj, dim): +def test_groupby_repr(obj, dim) -> None: actual = repr(obj.groupby(dim)) expected = f"{obj.__class__.__name__}GroupBy" expected += ", grouped over %r" % dim @@ -403,7 +403,7 @@ def test_groupby_repr(obj, dim): @pytest.mark.parametrize("obj", [repr_da, repr_da.to_dataset(name="a")]) -def test_groupby_repr_datetime(obj): +def test_groupby_repr_datetime(obj) -> None: actual = repr(obj.groupby("t.month")) expected = f"{obj.__class__.__name__}GroupBy" expected += ", grouped over 'month'" @@ -412,7 +412,7 @@ def test_groupby_repr_datetime(obj): assert actual == expected -def test_groupby_drops_nans(): +def test_groupby_drops_nans() -> None: # GH2383 # nan in 2D data variable (requires stacking) ds = xr.Dataset( @@ -454,9 +454,9 @@ def test_groupby_drops_nans(): # NaN in non-dimensional coordinate array = xr.DataArray([1, 2, 3], [("x", [1, 2, 3])]) array["x1"] = ("x", [1, 1, np.nan]) - expected = xr.DataArray(3, [("x1", [1])]) + expected_da = xr.DataArray(3, [("x1", [1])]) actual = array.groupby("x1").sum() - assert_equal(expected, actual) + assert_equal(expected_da, actual) # NaT in non-dimensional coordinate array["t"] = ( @@ -467,18 +467,18 @@ def test_groupby_drops_nans(): np.datetime64("NaT"), ], ) - expected = xr.DataArray(3, [("t", [np.datetime64("2001-01-01")])]) + expected_da = xr.DataArray(3, [("t", [np.datetime64("2001-01-01")])]) actual = array.groupby("t").sum() - assert_equal(expected, actual) + assert_equal(expected_da, actual) # test for repeated coordinate labels array = xr.DataArray([0, 1, 2, 4, 3, 4], [("x", [np.nan, 1, 1, np.nan, 2, np.nan])]) - expected = xr.DataArray([3, 3], [("x", [1, 2])]) + expected_da = xr.DataArray([3, 3], [("x", [1, 2])]) actual = array.groupby("x").sum() - assert_equal(expected, actual) + assert_equal(expected_da, actual) -def test_groupby_grouping_errors(): +def test_groupby_grouping_errors() -> None: dataset = xr.Dataset({"foo": ("x", [1, 1, 1])}, {"x": [1, 2, 3]}) with pytest.raises( ValueError, match=r"None of the data falls within bins with edges" @@ -503,7 +503,7 @@ def test_groupby_grouping_errors(): dataset.to_array().groupby(dataset.foo * np.nan) -def test_groupby_reduce_dimension_error(array): +def test_groupby_reduce_dimension_error(array) -> None: grouped = array.groupby("y") with pytest.raises(ValueError, match=r"cannot reduce over dimensions"): grouped.mean() @@ -521,12 +521,12 @@ def test_groupby_reduce_dimension_error(array): assert_allclose(array.mean(["x", "z"]), grouped.reduce(np.mean, ["x", "z"])) -def test_groupby_multiple_string_args(array): +def test_groupby_multiple_string_args(array) -> None: with pytest.raises(TypeError): array.groupby("x", "y") -def test_groupby_bins_timeseries(): +def test_groupby_bins_timeseries() -> None: ds = xr.Dataset() ds["time"] = xr.DataArray( pd.date_range("2010-08-01", "2010-08-15", freq="15min"), dims="time" @@ -542,7 +542,7 @@ def test_groupby_bins_timeseries(): assert_identical(actual, expected) -def test_groupby_none_group_name(): +def test_groupby_none_group_name() -> None: # GH158 # xarray should not fail if a DataArray's name attribute is None @@ -554,7 +554,7 @@ def test_groupby_none_group_name(): assert "group" in mean.dims -def test_groupby_getitem(dataset): +def test_groupby_getitem(dataset) -> None: assert_identical(dataset.sel(x="a"), dataset.groupby("x")["a"]) assert_identical(dataset.sel(z=1), dataset.groupby("z")[1]) @@ -567,7 +567,7 @@ def test_groupby_getitem(dataset): assert_identical(expected, actual) -def test_groupby_dataset(): +def test_groupby_dataset() -> None: data = Dataset( {"z": (["x", "y"], np.random.randn(3, 5))}, {"x": ("x", list("abc")), "c": ("x", [0, 1, 0]), "y": range(5)}, @@ -593,7 +593,7 @@ def identity(x): assert_equal(data, actual) -def test_groupby_dataset_returns_new_type(): +def test_groupby_dataset_returns_new_type() -> None: data = Dataset({"z": (["x", "y"], np.random.randn(3, 5))}) actual = data.groupby("x").map(lambda ds: ds["z"]) @@ -601,11 +601,11 @@ def test_groupby_dataset_returns_new_type(): assert_identical(expected, actual) actual = data["z"].groupby("x").map(lambda x: x.to_dataset()) - expected = data - assert_identical(expected, actual) + expected_ds = data + assert_identical(expected_ds, actual) -def test_groupby_dataset_iter(): +def test_groupby_dataset_iter() -> None: data = create_test_data() for n, (t, sub) in enumerate(list(data.groupby("dim1"))[:3]): assert data["dim1"][n] == t @@ -614,7 +614,7 @@ def test_groupby_dataset_iter(): assert_equal(data["var3"][:, n], sub["var3"]) -def test_groupby_dataset_errors(): +def test_groupby_dataset_errors() -> None: data = create_test_data() with pytest.raises(TypeError, match=r"`group` must be"): data.groupby(np.arange(10)) @@ -624,7 +624,7 @@ def test_groupby_dataset_errors(): data.groupby(data.coords["dim1"].to_index()) -def test_groupby_dataset_reduce(): +def test_groupby_dataset_reduce() -> None: data = Dataset( { "xy": (["x", "y"], np.random.randn(3, 4)), @@ -654,7 +654,7 @@ def test_groupby_dataset_reduce(): assert_allclose(expected, actual) -def test_groupby_dataset_math(): +def test_groupby_dataset_math() -> None: def reorder_dims(x): return x.transpose("dim1", "dim2", "dim3", "time") @@ -710,7 +710,7 @@ def reorder_dims(x): ds + ds.groupby("time.month") -def test_groupby_dataset_math_virtual(): +def test_groupby_dataset_math_virtual() -> None: ds = Dataset({"x": ("t", [1, 2, 3])}, {"t": pd.date_range("20100101", periods=3)}) grouped = ds.groupby("t.day") actual = grouped - grouped.mean(...) @@ -718,7 +718,7 @@ def test_groupby_dataset_math_virtual(): assert_identical(actual, expected) -def test_groupby_dataset_nan(): +def test_groupby_dataset_nan() -> None: # nan should be excluded from groupby ds = Dataset({"foo": ("x", [1, 2, 3, 4])}, {"bar": ("x", [1, 1, 2, np.nan])}) actual = ds.groupby("bar").mean(...) @@ -726,7 +726,7 @@ def test_groupby_dataset_nan(): assert_identical(actual, expected) -def test_groupby_dataset_order(): +def test_groupby_dataset_order() -> None: # groupby should preserve variables order ds = Dataset() for vn in ["a", "b", "c"]: diff --git a/xarray/tests/test_indexes.py b/xarray/tests/test_indexes.py index c8ba72a253f..18f76df765d 100644 --- a/xarray/tests/test_indexes.py +++ b/xarray/tests/test_indexes.py @@ -7,7 +7,7 @@ from xarray.core.variable import IndexVariable -def test_asarray_tuplesafe(): +def test_asarray_tuplesafe() -> None: res = _asarray_tuplesafe(("a", 1)) assert isinstance(res, np.ndarray) assert res.ndim == 0 @@ -20,14 +20,14 @@ def test_asarray_tuplesafe(): class TestPandasIndex: - def test_constructor(self): + def test_constructor(self) -> None: pd_idx = pd.Index([1, 2, 3]) index = PandasIndex(pd_idx, "x") assert index.index is pd_idx assert index.dim == "x" - def test_from_variables(self): + def test_from_variables(self) -> None: var = xr.Variable( "x", [1, 2, 3], attrs={"unit": "m"}, encoding={"dtype": np.int32} ) @@ -46,7 +46,7 @@ def test_from_variables(self): ): PandasIndex.from_variables({"foo": var2}) - def test_from_pandas_index(self): + def test_from_pandas_index(self) -> None: pd_idx = pd.Index([1, 2, 3], name="foo") index, index_vars = PandasIndex.from_pandas_index(pd_idx, "x") @@ -68,7 +68,7 @@ def to_pandas_index(self): index = PandasIndex(pd_idx, "x") assert index.to_pandas_index() is pd_idx - def test_query(self): + def test_query(self) -> None: # TODO: add tests that aren't just for edge cases index = PandasIndex(pd.Index([1, 2, 3]), "x") with pytest.raises(KeyError, match=r"not all values found"): @@ -78,7 +78,7 @@ def test_query(self): with pytest.raises(ValueError, match=r"does not have a MultiIndex"): index.query({"x": {"one": 0}}) - def test_query_datetime(self): + def test_query_datetime(self) -> None: index = PandasIndex( pd.to_datetime(["2000-01-01", "2001-01-01", "2002-01-01"]), "x" ) @@ -89,7 +89,7 @@ def test_query_datetime(self): actual = index.query({"x": index.to_pandas_index().to_numpy()[1]}) assert actual == expected - def test_query_unsorted_datetime_index_raises(self): + def test_query_unsorted_datetime_index_raises(self) -> None: index = PandasIndex(pd.to_datetime(["2001", "2000", "2002"]), "x") with pytest.raises(KeyError): # pandas will try to convert this into an array indexer. We should @@ -97,26 +97,26 @@ def test_query_unsorted_datetime_index_raises(self): # slice is always a view. index.query({"x": slice("2001", "2002")}) - def test_equals(self): + def test_equals(self) -> None: index1 = PandasIndex([1, 2, 3], "x") index2 = PandasIndex([1, 2, 3], "x") assert index1.equals(index2) is True - def test_union(self): + def test_union(self) -> None: index1 = PandasIndex([1, 2, 3], "x") index2 = PandasIndex([4, 5, 6], "y") actual = index1.union(index2) assert actual.index.equals(pd.Index([1, 2, 3, 4, 5, 6])) assert actual.dim == "x" - def test_intersection(self): + def test_intersection(self) -> None: index1 = PandasIndex([1, 2, 3], "x") index2 = PandasIndex([2, 3, 4], "y") actual = index1.intersection(index2) assert actual.index.equals(pd.Index([2, 3])) assert actual.dim == "x" - def test_copy(self): + def test_copy(self) -> None: expected = PandasIndex([1, 2, 3], "x") actual = expected.copy() @@ -124,7 +124,7 @@ def test_copy(self): assert actual.index is not expected.index assert actual.dim == expected.dim - def test_getitem(self): + def test_getitem(self) -> None: pd_idx = pd.Index([1, 2, 3]) expected = PandasIndex(pd_idx, "x") actual = expected[1:] @@ -134,7 +134,7 @@ def test_getitem(self): class TestPandasMultiIndex: - def test_from_variables(self): + def test_from_variables(self) -> None: v_level1 = xr.Variable( "x", [1, 2, 3], attrs={"unit": "m"}, encoding={"dtype": np.int32} ) @@ -165,7 +165,7 @@ def test_from_variables(self): with pytest.raises(ValueError, match=r"unmatched dimensions for variables.*"): PandasMultiIndex.from_variables({"level1": v_level1, "level3": v_level3}) - def test_from_pandas_index(self): + def test_from_pandas_index(self) -> None: pd_idx = pd.MultiIndex.from_arrays([[1, 2, 3], [4, 5, 6]], names=("foo", "bar")) index, index_vars = PandasMultiIndex.from_pandas_index(pd_idx, "x") @@ -177,7 +177,7 @@ def test_from_pandas_index(self): xr.testing.assert_identical(index_vars["foo"], IndexVariable("x", [1, 2, 3])) xr.testing.assert_identical(index_vars["bar"], IndexVariable("x", [4, 5, 6])) - def test_query(self): + def test_query(self) -> None: index = PandasMultiIndex( pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("one", "two")), "x" ) diff --git a/xarray/tests/test_indexing.py b/xarray/tests/test_indexing.py index 6e4fd320029..48099e6c94f 100644 --- a/xarray/tests/test_indexing.py +++ b/xarray/tests/test_indexing.py @@ -18,7 +18,7 @@ def set_to_zero(self, x, i): x[i] = 0 return x - def test_expanded_indexer(self): + def test_expanded_indexer(self) -> None: x = np.random.randn(10, 11, 12, 13, 14) y = np.arange(5) arr = ReturnItem() @@ -40,7 +40,7 @@ def test_expanded_indexer(self): with pytest.raises(IndexError, match=r"too many indices"): indexing.expanded_indexer(arr[1, 2, 3], 2) - def test_stacked_multiindex_min_max(self): + def test_stacked_multiindex_min_max(self) -> None: data = np.random.randn(3, 23, 4) da = DataArray( data, @@ -55,7 +55,7 @@ def test_stacked_multiindex_min_max(self): assert_array_equal(da2.loc["a", s.max()], data[2, 22, 0]) assert_array_equal(da2.loc["b", s.min()], data[0, 0, 1]) - def test_group_indexers_by_index(self): + def test_group_indexers_by_index(self) -> None: mindex = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("one", "two")) data = DataArray( np.zeros((4, 2, 2)), coords={"x": mindex, "y": [1, 2]}, dims=("x", "y", "z") @@ -79,8 +79,8 @@ def test_group_indexers_by_index(self): with pytest.raises(ValueError, match=r"cannot supply.*"): indexing.group_indexers_by_index(data, {"z": 1}, method="nearest") - def test_remap_label_indexers(self): - def test_indexer(data, x, expected_pos, expected_idx=None): + def test_remap_label_indexers(self) -> None: + def test_indexer(data, x, expected_pos, expected_idx=None) -> None: pos, new_idx_vars = indexing.remap_label_indexers(data, {"x": x}) idx, _ = new_idx_vars.get("x", (None, None)) if idx is not None: @@ -139,7 +139,7 @@ def test_indexer(data, x, expected_pos, expected_idx=None): pd.MultiIndex.from_product([[1, 2], [-1, -2]]), ) - def test_read_only_view(self): + def test_read_only_view(self) -> None: arr = DataArray( np.random.rand(3, 3), @@ -153,7 +153,7 @@ def test_read_only_view(self): class TestLazyArray: - def test_slice_slice(self): + def test_slice_slice(self) -> None: arr = ReturnItem() for size in [100, 99]: # We test even/odd size cases @@ -183,7 +183,7 @@ def test_slice_slice(self): actual = x[new_slice] assert_array_equal(expected, actual) - def test_lazily_indexed_array(self): + def test_lazily_indexed_array(self) -> None: original = np.random.rand(10, 20, 30) x = indexing.NumpyIndexingAdapter(original) v = Variable(["i", "j", "k"], original) @@ -250,7 +250,7 @@ def test_lazily_indexed_array(self): assert isinstance(actual._data, indexing.LazilyIndexedArray) assert isinstance(actual._data.array, indexing.NumpyIndexingAdapter) - def test_vectorized_lazily_indexed_array(self): + def test_vectorized_lazily_indexed_array(self) -> None: original = np.random.rand(10, 20, 30) x = indexing.NumpyIndexingAdapter(original) v_eager = Variable(["i", "j", "k"], x) @@ -300,14 +300,14 @@ def check_indexing(v_eager, v_lazy, indexers): class TestCopyOnWriteArray: - def test_setitem(self): + def test_setitem(self) -> None: original = np.arange(10) wrapped = indexing.CopyOnWriteArray(original) wrapped[B[:]] = 0 assert_array_equal(original, np.arange(10)) assert_array_equal(wrapped, np.zeros(10)) - def test_sub_array(self): + def test_sub_array(self) -> None: original = np.arange(10) wrapped = indexing.CopyOnWriteArray(original) child = wrapped[B[:5]] @@ -317,20 +317,20 @@ def test_sub_array(self): assert_array_equal(wrapped, np.arange(10)) assert_array_equal(child, np.zeros(5)) - def test_index_scalar(self): + def test_index_scalar(self) -> None: # regression test for GH1374 x = indexing.CopyOnWriteArray(np.array(["foo", "bar"])) assert np.array(x[B[0]][B[()]]) == "foo" class TestMemoryCachedArray: - def test_wrapper(self): + def test_wrapper(self) -> None: original = indexing.LazilyIndexedArray(np.arange(10)) wrapped = indexing.MemoryCachedArray(original) assert_array_equal(wrapped, np.arange(10)) assert isinstance(wrapped.array, indexing.NumpyIndexingAdapter) - def test_sub_array(self): + def test_sub_array(self) -> None: original = indexing.LazilyIndexedArray(np.arange(10)) wrapped = indexing.MemoryCachedArray(original) child = wrapped[B[:5]] @@ -339,19 +339,19 @@ def test_sub_array(self): assert isinstance(child.array, indexing.NumpyIndexingAdapter) assert isinstance(wrapped.array, indexing.LazilyIndexedArray) - def test_setitem(self): + def test_setitem(self) -> None: original = np.arange(10) wrapped = indexing.MemoryCachedArray(original) wrapped[B[:]] = 0 assert_array_equal(original, np.zeros(10)) - def test_index_scalar(self): + def test_index_scalar(self) -> None: # regression test for GH1374 x = indexing.MemoryCachedArray(np.array(["foo", "bar"])) assert np.array(x[B[0]][B[()]]) == "foo" -def test_base_explicit_indexer(): +def test_base_explicit_indexer() -> None: with pytest.raises(TypeError): indexing.ExplicitIndexer(()) @@ -367,7 +367,7 @@ class Subclass(indexing.ExplicitIndexer): "indexer_cls", [indexing.BasicIndexer, indexing.OuterIndexer, indexing.VectorizedIndexer], ) -def test_invalid_for_all(indexer_cls): +def test_invalid_for_all(indexer_cls) -> None: with pytest.raises(TypeError): indexer_cls(None) with pytest.raises(TypeError): @@ -409,7 +409,7 @@ def check_array2d(indexer_cls): np.testing.assert_array_equal(value, array) -def test_basic_indexer(): +def test_basic_indexer() -> None: check_integer(indexing.BasicIndexer) check_slice(indexing.BasicIndexer) with pytest.raises(TypeError): @@ -418,7 +418,7 @@ def test_basic_indexer(): check_array2d(indexing.BasicIndexer) -def test_outer_indexer(): +def test_outer_indexer() -> None: check_integer(indexing.OuterIndexer) check_slice(indexing.OuterIndexer) check_array1d(indexing.OuterIndexer) @@ -426,7 +426,7 @@ def test_outer_indexer(): check_array2d(indexing.OuterIndexer) -def test_vectorized_indexer(): +def test_vectorized_indexer() -> None: with pytest.raises(TypeError): check_integer(indexing.VectorizedIndexer) check_slice(indexing.VectorizedIndexer) @@ -450,7 +450,7 @@ def setup(self): slice(None), ] - def test_arrayize_vectorized_indexer(self): + def test_arrayize_vectorized_indexer(self) -> None: for i, j, k in itertools.product(self.indexers, repeat=3): vindex = indexing.VectorizedIndexer((i, j, k)) vindex_array = indexing._arrayize_vectorized_indexer( @@ -530,7 +530,7 @@ def get_indexers(shape, mode): @pytest.mark.parametrize( "sl", [slice(1, -1, 1), slice(None, -1, 2), slice(-1, 1, -1), slice(-1, 1, -2)] ) -def test_decompose_slice(size, sl): +def test_decompose_slice(size, sl) -> None: x = np.arange(size) slice1, slice2 = indexing._decompose_slice(sl, size) expected = x[sl] @@ -562,7 +562,7 @@ def test_decompose_slice(size, sl): indexing.IndexingSupport.VECTORIZED, ], ) -def test_decompose_indexers(shape, indexer_mode, indexing_support): +def test_decompose_indexers(shape, indexer_mode, indexing_support) -> None: data = np.random.randn(*shape) indexer = get_indexers(shape, indexer_mode) @@ -580,7 +580,7 @@ def test_decompose_indexers(shape, indexer_mode, indexing_support): np.testing.assert_array_equal(expected, array) -def test_implicit_indexing_adapter(): +def test_implicit_indexing_adapter() -> None: array = np.arange(10, dtype=np.int64) implicit = indexing.ImplicitToExplicitIndexingAdapter( indexing.NumpyIndexingAdapter(array), indexing.BasicIndexer @@ -589,7 +589,7 @@ def test_implicit_indexing_adapter(): np.testing.assert_array_equal(array, implicit[:]) -def test_implicit_indexing_adapter_copy_on_write(): +def test_implicit_indexing_adapter_copy_on_write() -> None: array = np.arange(10, dtype=np.int64) implicit = indexing.ImplicitToExplicitIndexingAdapter( indexing.CopyOnWriteArray(array) @@ -597,7 +597,7 @@ def test_implicit_indexing_adapter_copy_on_write(): assert isinstance(implicit[:], indexing.ImplicitToExplicitIndexingAdapter) -def test_outer_indexer_consistency_with_broadcast_indexes_vectorized(): +def test_outer_indexer_consistency_with_broadcast_indexes_vectorized() -> None: def nonzero(x): if isinstance(x, np.ndarray) and x.dtype.kind == "b": x = x.nonzero()[0] @@ -635,7 +635,7 @@ def nonzero(x): np.testing.assert_array_equal(actual_data, expected_data) -def test_create_mask_outer_indexer(): +def test_create_mask_outer_indexer() -> None: indexer = indexing.OuterIndexer((np.array([0, -1, 2]),)) expected = np.array([False, True, False]) actual = indexing.create_mask(indexer, (5,)) @@ -647,7 +647,7 @@ def test_create_mask_outer_indexer(): np.testing.assert_array_equal(expected, actual) -def test_create_mask_vectorized_indexer(): +def test_create_mask_vectorized_indexer() -> None: indexer = indexing.VectorizedIndexer((np.array([0, -1, 2]), np.array([0, 1, -1]))) expected = np.array([False, True, True]) actual = indexing.create_mask(indexer, (5,)) @@ -661,7 +661,7 @@ def test_create_mask_vectorized_indexer(): np.testing.assert_array_equal(expected, actual) -def test_create_mask_basic_indexer(): +def test_create_mask_basic_indexer() -> None: indexer = indexing.BasicIndexer((-1,)) actual = indexing.create_mask(indexer, (3,)) np.testing.assert_array_equal(True, actual) @@ -671,7 +671,7 @@ def test_create_mask_basic_indexer(): np.testing.assert_array_equal(False, actual) -def test_create_mask_dask(): +def test_create_mask_dask() -> None: da = pytest.importorskip("dask.array") indexer = indexing.OuterIndexer((1, slice(2), np.array([0, -1, 2]))) @@ -682,21 +682,21 @@ def test_create_mask_dask(): assert actual.chunks == ((1, 1), (2, 1)) np.testing.assert_array_equal(expected, actual) - indexer = indexing.VectorizedIndexer( + indexer_vec = indexing.VectorizedIndexer( (np.array([0, -1, 2]), slice(None), np.array([0, 1, -1])) ) expected = np.array([[False, True, True]] * 2).T actual = indexing.create_mask( - indexer, (5, 2), da.empty((3, 2), chunks=((3,), (2,))) + indexer_vec, (5, 2), da.empty((3, 2), chunks=((3,), (2,))) ) assert isinstance(actual, da.Array) np.testing.assert_array_equal(expected, actual) with pytest.raises(ValueError): - indexing.create_mask(indexer, (5, 2), da.empty((5,), chunks=(1,))) + indexing.create_mask(indexer_vec, (5, 2), da.empty((5,), chunks=(1,))) -def test_create_mask_error(): +def test_create_mask_error() -> None: with pytest.raises(TypeError, match=r"unexpected key type"): indexing.create_mask((1, 2), (3, 4)) @@ -713,12 +713,12 @@ def test_create_mask_error(): (np.array([0, -1, -1, -1, 1]), np.array([0, 0, 0, 0, 1])), ], ) -def test_posify_mask_subindexer(indices, expected): +def test_posify_mask_subindexer(indices, expected) -> None: actual = indexing._posify_mask_subindexer(indices) np.testing.assert_array_equal(expected, actual) -def test_indexing_1d_object_array(): +def test_indexing_1d_object_array() -> None: items = (np.arange(3), np.arange(6)) arr = DataArray(np.array(items, dtype=object)) diff --git a/xarray/tests/test_nputils.py b/xarray/tests/test_nputils.py index 3c9c92ae2ba..ba8e70ea514 100644 --- a/xarray/tests/test_nputils.py +++ b/xarray/tests/test_nputils.py @@ -4,13 +4,13 @@ from xarray.core.nputils import NumpyVIndexAdapter, _is_contiguous -def test_is_contiguous(): +def test_is_contiguous() -> None: assert _is_contiguous([1]) assert _is_contiguous([1, 2, 3]) assert not _is_contiguous([1, 3]) -def test_vindex(): +def test_vindex() -> None: x = np.arange(3 * 4 * 5).reshape((3, 4, 5)) vindex = NumpyVIndexAdapter(x) diff --git a/xarray/tests/test_options.py b/xarray/tests/test_options.py index 19f74476ced..be71500dc0a 100644 --- a/xarray/tests/test_options.py +++ b/xarray/tests/test_options.py @@ -7,12 +7,12 @@ from xarray.tests.test_dataset import create_test_data -def test_invalid_option_raises(): +def test_invalid_option_raises() -> None: with pytest.raises(ValueError): xarray.set_options(not_a_valid_options=True) -def test_display_width(): +def test_display_width() -> None: with pytest.raises(ValueError): xarray.set_options(display_width=0) with pytest.raises(ValueError): @@ -21,14 +21,14 @@ def test_display_width(): xarray.set_options(display_width=3.5) -def test_arithmetic_join(): +def test_arithmetic_join() -> None: with pytest.raises(ValueError): xarray.set_options(arithmetic_join="invalid") with xarray.set_options(arithmetic_join="exact"): assert OPTIONS["arithmetic_join"] == "exact" -def test_enable_cftimeindex(): +def test_enable_cftimeindex() -> None: with pytest.raises(ValueError): xarray.set_options(enable_cftimeindex=None) with pytest.warns(FutureWarning, match="no-op"): @@ -36,7 +36,7 @@ def test_enable_cftimeindex(): assert OPTIONS["enable_cftimeindex"] -def test_file_cache_maxsize(): +def test_file_cache_maxsize() -> None: with pytest.raises(ValueError): xarray.set_options(file_cache_maxsize=0) original_size = FILE_CACHE.maxsize @@ -45,7 +45,7 @@ def test_file_cache_maxsize(): assert FILE_CACHE.maxsize == original_size -def test_keep_attrs(): +def test_keep_attrs() -> None: with pytest.raises(ValueError): xarray.set_options(keep_attrs="invalid_str") with xarray.set_options(keep_attrs=True): @@ -57,7 +57,7 @@ def test_keep_attrs(): assert not _get_keep_attrs(default=False) -def test_nested_options(): +def test_nested_options() -> None: original = OPTIONS["display_width"] with xarray.set_options(display_width=1): assert OPTIONS["display_width"] == 1 @@ -67,7 +67,7 @@ def test_nested_options(): assert OPTIONS["display_width"] == original -def test_display_style(): +def test_display_style() -> None: original = "html" assert OPTIONS["display_style"] == original with pytest.raises(ValueError): @@ -90,7 +90,7 @@ def create_test_dataarray_attrs(seed=0, var="var1"): class TestAttrRetention: - def test_dataset_attr_retention(self): + def test_dataset_attr_retention(self) -> None: # Use .mean() for all tests: a typical reduction operation ds = create_test_dataset_attrs() original_attrs = ds.attrs @@ -110,7 +110,7 @@ def test_dataset_attr_retention(self): result = ds.mean() assert result.attrs == {} - def test_dataarray_attr_retention(self): + def test_dataarray_attr_retention(self) -> None: # Use .mean() for all tests: a typical reduction operation da = create_test_dataarray_attrs() original_attrs = da.attrs @@ -130,7 +130,7 @@ def test_dataarray_attr_retention(self): result = da.mean() assert result.attrs == {} - def test_groupby_attr_retention(self): + def test_groupby_attr_retention(self) -> None: da = xarray.DataArray([1, 2, 3], [("x", [1, 1, 2])]) da.attrs = {"attr1": 5, "attr2": "history", "attr3": {"nested": "more_info"}} original_attrs = da.attrs @@ -151,7 +151,7 @@ def test_groupby_attr_retention(self): result = da.groupby("x").sum() assert result.attrs == {} - def test_concat_attr_retention(self): + def test_concat_attr_retention(self) -> None: ds1 = create_test_dataset_attrs() ds2 = create_test_dataset_attrs() ds2.attrs = {"wrong": "attributes"} @@ -164,7 +164,7 @@ def test_concat_attr_retention(self): assert result.attrs == original_attrs @pytest.mark.xfail - def test_merge_attr_retention(self): + def test_merge_attr_retention(self) -> None: da1 = create_test_dataarray_attrs(var="var1") da2 = create_test_dataarray_attrs(var="var2") da2.attrs = {"wrong": "attributes"} @@ -175,7 +175,7 @@ def test_merge_attr_retention(self): result = merge([da1, da2]) assert result.attrs == original_attrs - def test_display_style_text(self): + def test_display_style_text(self) -> None: ds = create_test_dataset_attrs() with xarray.set_options(display_style="text"): text = ds._repr_html_() @@ -183,21 +183,21 @@ def test_display_style_text(self): assert "'nested'" in text assert "<xarray.Dataset>" in text - def test_display_style_html(self): + def test_display_style_html(self) -> None: ds = create_test_dataset_attrs() with xarray.set_options(display_style="html"): html = ds._repr_html_() assert html.startswith("
    ") assert "'nested'" in html - def test_display_dataarray_style_text(self): + def test_display_dataarray_style_text(self) -> None: da = create_test_dataarray_attrs() with xarray.set_options(display_style="text"): text = da._repr_html_() assert text.startswith("
    ")
                 assert "<xarray.DataArray 'var1'" in text
     
    -    def test_display_dataarray_style_html(self):
    +    def test_display_dataarray_style_html(self) -> None:
             da = create_test_dataarray_attrs()
             with xarray.set_options(display_style="html"):
                 html = da._repr_html_()
    diff --git a/xarray/tests/test_plugins.py b/xarray/tests/test_plugins.py
    index b7a5f9405d1..7f77a677d6d 100644
    --- a/xarray/tests/test_plugins.py
    +++ b/xarray/tests/test_plugins.py
    @@ -39,13 +39,13 @@ def dummy_duplicated_entrypoints():
     
     
     @pytest.mark.filterwarnings("ignore:Found")
    -def test_remove_duplicates(dummy_duplicated_entrypoints):
    +def test_remove_duplicates(dummy_duplicated_entrypoints) -> None:
         with pytest.warns(RuntimeWarning):
             entrypoints = plugins.remove_duplicates(dummy_duplicated_entrypoints)
         assert len(entrypoints) == 2
     
     
    -def test_broken_plugin():
    +def test_broken_plugin() -> None:
         broken_backend = pkg_resources.EntryPoint.parse(
             "broken_backend = xarray.tests.test_plugins:backend_1"
         )
    @@ -56,7 +56,7 @@ def test_broken_plugin():
         assert "Engine 'broken_backend'" in message
     
     
    -def test_remove_duplicates_warnings(dummy_duplicated_entrypoints):
    +def test_remove_duplicates_warnings(dummy_duplicated_entrypoints) -> None:
     
         with pytest.warns(RuntimeWarning) as record:
             _ = plugins.remove_duplicates(dummy_duplicated_entrypoints)
    @@ -69,7 +69,7 @@ def test_remove_duplicates_warnings(dummy_duplicated_entrypoints):
     
     
     @mock.patch("pkg_resources.EntryPoint.load", mock.MagicMock(return_value=None))
    -def test_backends_dict_from_pkg():
    +def test_backends_dict_from_pkg() -> None:
         specs = [
             "engine1 = xarray.tests.test_plugins:backend_1",
             "engine2 = xarray.tests.test_plugins:backend_2",
    @@ -80,7 +80,7 @@ def test_backends_dict_from_pkg():
         assert engines.keys() == set(("engine1", "engine2"))
     
     
    -def test_set_missing_parameters():
    +def test_set_missing_parameters() -> None:
         backend_1 = DummyBackendEntrypoint1
         backend_2 = DummyBackendEntrypoint2
         backend_2.open_dataset_parameters = ("filename_or_obj",)
    @@ -96,28 +96,28 @@ def test_set_missing_parameters():
         plugins.set_missing_parameters({"engine": backend})
         assert backend.open_dataset_parameters == ("filename_or_obj", "decoder")
     
    -    backend = DummyBackendEntrypointArgs()
    -    backend.open_dataset_parameters = ("filename_or_obj", "decoder")
    -    plugins.set_missing_parameters({"engine": backend})
    -    assert backend.open_dataset_parameters == ("filename_or_obj", "decoder")
    +    backend_args = DummyBackendEntrypointArgs()
    +    backend_args.open_dataset_parameters = ("filename_or_obj", "decoder")
    +    plugins.set_missing_parameters({"engine": backend_args})
    +    assert backend_args.open_dataset_parameters == ("filename_or_obj", "decoder")
     
     
    -def test_set_missing_parameters_raise_error():
    +def test_set_missing_parameters_raise_error() -> None:
     
         backend = DummyBackendEntrypointKwargs()
         with pytest.raises(TypeError):
             plugins.set_missing_parameters({"engine": backend})
     
    -    backend = DummyBackendEntrypointArgs()
    +    backend_args = DummyBackendEntrypointArgs()
         with pytest.raises(TypeError):
    -        plugins.set_missing_parameters({"engine": backend})
    +        plugins.set_missing_parameters({"engine": backend_args})
     
     
     @mock.patch(
         "pkg_resources.EntryPoint.load",
         mock.MagicMock(return_value=DummyBackendEntrypoint1),
     )
    -def test_build_engines():
    +def test_build_engines() -> None:
         dummy_pkg_entrypoint = pkg_resources.EntryPoint.parse(
             "cfgrib = xarray.tests.test_plugins:backend_1"
         )
    @@ -134,7 +134,7 @@ def test_build_engines():
         "pkg_resources.EntryPoint.load",
         mock.MagicMock(return_value=DummyBackendEntrypoint1),
     )
    -def test_build_engines_sorted():
    +def test_build_engines_sorted() -> None:
         dummy_pkg_entrypoints = [
             pkg_resources.EntryPoint.parse(
                 "dummy2 = xarray.tests.test_plugins:backend_1",
    @@ -163,7 +163,7 @@ def test_build_engines_sorted():
         "xarray.backends.plugins.list_engines",
         mock.MagicMock(return_value={"dummy": DummyBackendEntrypointArgs()}),
     )
    -def test_no_matching_engine_found():
    +def test_no_matching_engine_found() -> None:
         with pytest.raises(ValueError, match=r"did not find a match in any"):
             plugins.guess_engine("not-valid")
     
    @@ -175,7 +175,7 @@ def test_no_matching_engine_found():
         "xarray.backends.plugins.list_engines",
         mock.MagicMock(return_value={}),
     )
    -def test_engines_not_installed():
    +def test_engines_not_installed() -> None:
         with pytest.raises(ValueError, match=r"xarray is unable to open"):
             plugins.guess_engine("not-valid")
     
    diff --git a/xarray/tests/test_print_versions.py b/xarray/tests/test_print_versions.py
    index 01c30e5e301..42ebe5b2ac2 100644
    --- a/xarray/tests/test_print_versions.py
    +++ b/xarray/tests/test_print_versions.py
    @@ -3,7 +3,7 @@
     import xarray
     
     
    -def test_show_versions():
    +def test_show_versions() -> None:
         f = io.StringIO()
         xarray.show_versions(file=f)
         assert "INSTALLED VERSIONS" in f.getvalue()
    diff --git a/xarray/tests/test_testing.py b/xarray/tests/test_testing.py
    index dc1db4dc8d7..2bde7529d1e 100644
    --- a/xarray/tests/test_testing.py
    +++ b/xarray/tests/test_testing.py
    @@ -29,7 +29,7 @@ def quantity(x):
         has_pint = False
     
     
    -def test_allclose_regression():
    +def test_allclose_regression() -> None:
         x = xr.DataArray(1.01)
         y = xr.DataArray(1.02)
         xr.testing.assert_allclose(x, y, atol=0.01)
    @@ -53,7 +53,7 @@ def test_allclose_regression():
             ),
         ),
     )
    -def test_assert_allclose(obj1, obj2):
    +def test_assert_allclose(obj1, obj2) -> None:
         with pytest.raises(AssertionError):
             xr.testing.assert_allclose(obj1, obj2)
     
    @@ -83,7 +83,7 @@ def test_assert_allclose(obj1, obj2):
             pytest.param(0.0, [1e-17, 2], id="first scalar"),
         ),
     )
    -def test_assert_duckarray_equal_failing(duckarray, obj1, obj2):
    +def test_assert_duckarray_equal_failing(duckarray, obj1, obj2) -> None:
         # TODO: actually check the repr
         a = duckarray(obj1)
         b = duckarray(obj2)
    @@ -119,7 +119,7 @@ def test_assert_duckarray_equal_failing(duckarray, obj1, obj2):
             pytest.param(0.0, [0, 0], id="first scalar"),
         ),
     )
    -def test_assert_duckarray_equal(duckarray, obj1, obj2):
    +def test_assert_duckarray_equal(duckarray, obj1, obj2) -> None:
         a = duckarray(obj1)
         b = duckarray(obj2)
     
    @@ -136,7 +136,7 @@ def test_assert_duckarray_equal(duckarray, obj1, obj2):
             "assert_duckarray_allclose",
         ],
     )
    -def test_ensure_warnings_not_elevated(func):
    +def test_ensure_warnings_not_elevated(func) -> None:
         # make sure warnings are not elevated to errors in the assertion functions
         # e.g. by @pytest.mark.filterwarnings("error")
         # see https://github.com/pydata/xarray/pull/4760#issuecomment-774101639
    diff --git a/xarray/tests/test_tutorial.py b/xarray/tests/test_tutorial.py
    index 225fda08f68..411ad52368d 100644
    --- a/xarray/tests/test_tutorial.py
    +++ b/xarray/tests/test_tutorial.py
    @@ -11,13 +11,15 @@ class TestLoadDataset:
         def setUp(self):
             self.testfile = "tiny"
     
    -    def test_download_from_github(self, tmp_path):
    +    def test_download_from_github(self, tmp_path) -> None:
             cache_dir = tmp_path / tutorial._default_cache_dir_name
             ds = tutorial.open_dataset(self.testfile, cache_dir=cache_dir).load()
             tiny = DataArray(range(5), name="tiny").to_dataset()
             assert_identical(ds, tiny)
     
    -    def test_download_from_github_load_without_cache(self, tmp_path, monkeypatch):
    +    def test_download_from_github_load_without_cache(
    +        self, tmp_path, monkeypatch
    +    ) -> None:
             cache_dir = tmp_path / tutorial._default_cache_dir_name
     
             ds_nocache = tutorial.open_dataset(
    
    From 6cf215cb37100df82130de3a4dae61ac6aeaa72b Mon Sep 17 00:00:00 2001
    From: Maximilian Roos 
    Date: Sat, 21 Aug 2021 12:57:27 -0700
    Subject: [PATCH 2/5]
    
    ---
     doc/whats-new.rst | 3 +++
     1 file changed, 3 insertions(+)
    
    diff --git a/doc/whats-new.rst b/doc/whats-new.rst
    index 4f79a37eb4b..f85393ec84d 100644
    --- a/doc/whats-new.rst
    +++ b/doc/whats-new.rst
    @@ -55,6 +55,9 @@ Internal Changes
       By `Benoit Bovy `_.
     - Fix ``Mapping`` argument typing to allow mypy to pass on ``str`` keys (:pull:`5690`).
       By `Maximilian Roos `_.
    +- Annotate many of our tests, and fix some of the resulting typing errors. This will
    +  also mean our typing annotations are tested as part of CI. (:pull:`5728`).
    +  By `Maximilian Roos `_.
     - Improve the performance of reprs for large datasets or dataarrays. (:pull:`5661`)
       By `Jimmy Westling `_.
     - Use isort's `float_to_top` config. (:pull:`5695`).
    
    From 33262b53e2e369572173310a4a732209f0dec578 Mon Sep 17 00:00:00 2001
    From: Maximilian Roos 
    Date: Sat, 21 Aug 2021 16:24:13 -0700
    Subject: [PATCH 3/5] fixes for newer numpy version
    
    ---
     xarray/core/merge.py                       |  1 +
     xarray/core/rolling_exp.py                 |  2 +-
     xarray/core/variable.py                    |  1 +
     xarray/tests/test_accessor_dt.py           | 67 +++++++++++-----------
     xarray/tests/test_backends_file_manager.py |  2 +-
     xarray/tests/test_backends_lru_cache.py    |  3 +-
     xarray/tests/test_coding_times.py          |  3 +-
     xarray/tests/test_formatting.py            | 17 +++---
     xarray/tests/test_formatting_html.py       |  3 +-
     xarray/tests/test_groupby.py               |  2 +-
     xarray/tests/test_indexing.py              |  6 +-
     11 files changed, 57 insertions(+), 50 deletions(-)
    
    diff --git a/xarray/core/merge.py b/xarray/core/merge.py
    index 6a3e531174a..a89e767826d 100644
    --- a/xarray/core/merge.py
    +++ b/xarray/core/merge.py
    @@ -1,4 +1,5 @@
     from __future__ import annotations
    +
     from typing import (
         TYPE_CHECKING,
         AbstractSet,
    diff --git a/xarray/core/rolling_exp.py b/xarray/core/rolling_exp.py
    index 4c556e8ba93..0224bae288e 100644
    --- a/xarray/core/rolling_exp.py
    +++ b/xarray/core/rolling_exp.py
    @@ -1,5 +1,5 @@
     from distutils.version import LooseVersion
    -from typing import TYPE_CHECKING, Generic, Mapping, TypeVar, Union, Any
    +from typing import TYPE_CHECKING, Any, Generic, Mapping, TypeVar, Union
     
     import numpy as np
     
    diff --git a/xarray/core/variable.py b/xarray/core/variable.py
    index 9623c392819..9a784ad4d35 100644
    --- a/xarray/core/variable.py
    +++ b/xarray/core/variable.py
    @@ -1,4 +1,5 @@
     from __future__ import annotations
    +
     import copy
     import itertools
     import numbers
    diff --git a/xarray/tests/test_accessor_dt.py b/xarray/tests/test_accessor_dt.py
    index 473f2f32507..135aa058439 100644
    --- a/xarray/tests/test_accessor_dt.py
    +++ b/xarray/tests/test_accessor_dt.py
    @@ -235,21 +235,22 @@ def test_dask_accessor_method(self, method, parameters) -> None:
         def test_seasons(self) -> None:
             dates = pd.date_range(start="2000/01/01", freq="M", periods=12)
             dates = xr.DataArray(dates)
    -        season_list = [
    -            "DJF",
    -            "DJF",
    -            "MAM",
    -            "MAM",
    -            "MAM",
    -            "JJA",
    -            "JJA",
    -            "JJA",
    -            "SON",
    -            "SON",
    -            "SON",
    -            "DJF",
    -        ]
    -        seasons = xr.DataArray(season_list)
    +        seasons = xr.DataArray(
    +            [
    +                "DJF",
    +                "DJF",
    +                "MAM",
    +                "MAM",
    +                "MAM",
    +                "JJA",
    +                "JJA",
    +                "JJA",
    +                "SON",
    +                "SON",
    +                "SON",
    +                "DJF",
    +            ]
    +        )
     
             assert_array_equal(seasons.values, dates.dt.season.values)
     
    @@ -515,23 +516,25 @@ def cftime_date_type(calendar):
     
     @requires_cftime
     def test_seasons(cftime_date_type) -> None:
    -    dates = np.array([cftime_date_type(2000, month, 15) for month in range(1, 13)])
    -    dates = xr.DataArray(dates)
    -    season_list = [
    -        "DJF",
    -        "DJF",
    -        "MAM",
    -        "MAM",
    -        "MAM",
    -        "JJA",
    -        "JJA",
    -        "JJA",
    -        "SON",
    -        "SON",
    -        "SON",
    -        "DJF",
    -    ]
    -    seasons = xr.DataArray(season_list)
    +    dates = xr.DataArray(
    +        np.array([cftime_date_type(2000, month, 15) for month in range(1, 13)])
    +    )
    +    seasons = xr.DataArray(
    +        [
    +            "DJF",
    +            "DJF",
    +            "MAM",
    +            "MAM",
    +            "MAM",
    +            "JJA",
    +            "JJA",
    +            "JJA",
    +            "SON",
    +            "SON",
    +            "SON",
    +            "DJF",
    +        ]
    +    )
     
         assert_array_equal(seasons.values, dates.dt.season.values)
     
    diff --git a/xarray/tests/test_backends_file_manager.py b/xarray/tests/test_backends_file_manager.py
    index 73a6a426371..6b8c4da01de 100644
    --- a/xarray/tests/test_backends_file_manager.py
    +++ b/xarray/tests/test_backends_file_manager.py
    @@ -1,11 +1,11 @@
     import gc
     import pickle
     import threading
    +from typing import Dict
     from unittest import mock
     
     import pytest
     
    -from typing import Dict
     from xarray.backends.file_manager import CachingFileManager
     from xarray.backends.lru_cache import LRUCache
     from xarray.core.options import set_options
    diff --git a/xarray/tests/test_backends_lru_cache.py b/xarray/tests/test_backends_lru_cache.py
    index 28924321f6e..2b0c7742e5c 100644
    --- a/xarray/tests/test_backends_lru_cache.py
    +++ b/xarray/tests/test_backends_lru_cache.py
    @@ -1,11 +1,10 @@
    +from typing import Any
     from unittest import mock
     
     import pytest
     
     from xarray.backends.lru_cache import LRUCache
     
    -from typing import Any
    -
     
     def test_simple() -> None:
         cache: LRUCache[Any, Any] = LRUCache(maxsize=2)
    diff --git a/xarray/tests/test_coding_times.py b/xarray/tests/test_coding_times.py
    index 4c40e7aff37..aff2cb8cf3a 100644
    --- a/xarray/tests/test_coding_times.py
    +++ b/xarray/tests/test_coding_times.py
    @@ -604,11 +604,10 @@ def test_cf_timedelta(timedeltas, units, numbers) -> None:
     
     
     def test_cf_timedelta_2d() -> None:
    -    timedeltas = ["1D", "2D", "3D"]
         units = "days"
         numbers = np.atleast_2d([1, 2, 3])
     
    -    timedeltas = np.atleast_2d(to_timedelta_unboxed(timedeltas))
    +    timedeltas = np.atleast_2d(to_timedelta_unboxed(["1D", "2D", "3D"]))
         expected = timedeltas
     
         actual = coding.times.decode_cf_timedelta(numbers, units)
    diff --git a/xarray/tests/test_formatting.py b/xarray/tests/test_formatting.py
    index 7fa124ea7a3..bcb42543046 100644
    --- a/xarray/tests/test_formatting.py
    +++ b/xarray/tests/test_formatting.py
    @@ -305,7 +305,7 @@ def test_diff_attrs_repr_with_array(self) -> None:
             actual = formatting.diff_attrs_repr(attrs_a, attrs_b, "equals")
             assert expected == actual
     
    -        attrs_b = {"attr": np.array([-3, 5])}
    +        attrs_c = {"attr": np.array([-3, 5])}
             expected = dedent(
                 """\
                 Differing attributes:
    @@ -313,11 +313,11 @@ def test_diff_attrs_repr_with_array(self) -> None:
                 R   attr: [-3  5]
                 """
             ).strip()
    -        actual = formatting.diff_attrs_repr(attrs_a, attrs_b, "equals")
    +        actual = formatting.diff_attrs_repr(attrs_a, attrs_c, "equals")
             assert expected == actual
     
             # should not raise a warning
    -        attrs_b = {"attr": np.array([0, 1, 2])}
    +        attrs_c = {"attr": np.array([0, 1, 2])}
             expected = dedent(
                 """\
                 Differing attributes:
    @@ -325,7 +325,7 @@ def test_diff_attrs_repr_with_array(self) -> None:
                 R   attr: [0 1 2]
                 """
             ).strip()
    -        actual = formatting.diff_attrs_repr(attrs_a, attrs_b, "equals")
    +        actual = formatting.diff_attrs_repr(attrs_a, attrs_c, "equals")
             assert expected == actual
     
         def test_diff_dataset_repr(self) -> None:
    @@ -501,15 +501,18 @@ def test_repr_file_collapsed(tmp_path) -> None:
             assert actual == expected
     
     
    +from numpy.core import defchararray
    +
    +
     @pytest.mark.parametrize(
         "display_max_rows, n_vars, n_attr",
         [(50, 40, 30), (35, 40, 30), (11, 40, 30), (1, 40, 30)],
     )
     def test__mapping_repr(display_max_rows, n_vars, n_attr) -> None:
         long_name = "long_name"
    -    a = np.core.defchararray.add(long_name, np.arange(0, n_vars).astype(str))
    -    b = np.core.defchararray.add("attr_", np.arange(0, n_attr).astype(str))
    -    c = np.core.defchararray.add("coord", np.arange(0, n_vars).astype(str))
    +    a = defchararray.add(long_name, np.arange(0, n_vars).astype(str))
    +    b = defchararray.add("attr_", np.arange(0, n_attr).astype(str))
    +    c = defchararray.add("coord", np.arange(0, n_vars).astype(str))
         attrs = {k: 2 for k in b}
         coords = {_c: np.array([0, 1]) for _c in c}
         data_vars = dict()
    diff --git a/xarray/tests/test_formatting_html.py b/xarray/tests/test_formatting_html.py
    index 51303a7c4d8..4ee80f65027 100644
    --- a/xarray/tests/test_formatting_html.py
    +++ b/xarray/tests/test_formatting_html.py
    @@ -1,5 +1,6 @@
    -import numpy as np
     from typing import Dict, List
    +
    +import numpy as np
     import pandas as pd
     import pytest
     
    diff --git a/xarray/tests/test_groupby.py b/xarray/tests/test_groupby.py
    index 98109cce8c7..d48726e8304 100644
    --- a/xarray/tests/test_groupby.py
    +++ b/xarray/tests/test_groupby.py
    @@ -540,7 +540,7 @@ def test_groupby_bins_timeseries() -> None:
         ds["time"] = xr.DataArray(
             pd.date_range("2010-08-01", "2010-08-15", freq="15min"), dims="time"
         )
    -    ds["val"] = xr.DataArray(np.ones(*ds["time"].shape), dims="time")
    +    ds["val"] = xr.DataArray(np.ones(ds["time"].shape), dims="time")
         time_bins = pd.date_range(start="2010-08-01", end="2010-08-15", freq="24H")
         actual = ds.groupby_bins("time", time_bins).sum()
         expected = xr.DataArray(
    diff --git a/xarray/tests/test_indexing.py b/xarray/tests/test_indexing.py
    index 48099e6c94f..971fcbabd8d 100644
    --- a/xarray/tests/test_indexing.py
    +++ b/xarray/tests/test_indexing.py
    @@ -228,10 +228,10 @@ def test_lazily_indexed_array(self) -> None:
                 ([0, 3, 5], arr[:2]),
             ]
             for i, j in indexers:
    -            expected = v[i][j]
    +            expected_b = v[i][j]
                 actual = v_lazy[i][j]
    -            assert expected.shape == actual.shape
    -            assert_array_equal(expected, actual)
    +            assert expected_b.shape == actual.shape
    +            assert_array_equal(expected_b, actual)
     
                 # test transpose
                 if actual.ndim > 1:
    
    From 147b2602339a6f74e7b67aba640de6f0b71f9f37 Mon Sep 17 00:00:00 2001
    From: Maximilian Roos 
    Date: Sat, 21 Aug 2021 16:35:50 -0700
    Subject: [PATCH 4/5] .
    
    ---
     xarray/tests/test_formatting.py | 4 +---
     1 file changed, 1 insertion(+), 3 deletions(-)
    
    diff --git a/xarray/tests/test_formatting.py b/xarray/tests/test_formatting.py
    index bcb42543046..9e53cac3aa6 100644
    --- a/xarray/tests/test_formatting.py
    +++ b/xarray/tests/test_formatting.py
    @@ -4,6 +4,7 @@
     import numpy as np
     import pandas as pd
     import pytest
    +from numpy.core import defchararray
     
     import xarray as xr
     from xarray.core import formatting
    @@ -501,9 +502,6 @@ def test_repr_file_collapsed(tmp_path) -> None:
             assert actual == expected
     
     
    -from numpy.core import defchararray
    -
    -
     @pytest.mark.parametrize(
         "display_max_rows, n_vars, n_attr",
         [(50, 40, 30), (35, 40, 30), (11, 40, 30), (1, 40, 30)],
    
    From 56b3cdede50f0a0197269daf556af1d817f28fa3 Mon Sep 17 00:00:00 2001
    From: Maximilian Roos 
    Date: Sat, 21 Aug 2021 17:04:53 -0700
    Subject: [PATCH 5/5] .
    
    ---
     xarray/tests/test_indexing.py | 2 +-
     1 file changed, 1 insertion(+), 1 deletion(-)
    
    diff --git a/xarray/tests/test_indexing.py b/xarray/tests/test_indexing.py
    index 971fcbabd8d..533f4a0cd62 100644
    --- a/xarray/tests/test_indexing.py
    +++ b/xarray/tests/test_indexing.py
    @@ -238,7 +238,7 @@ def test_lazily_indexed_array(self) -> None:
                     order = np.random.choice(actual.ndim, actual.ndim)
                     order = np.array(actual.dims)
                     transposed = actual.transpose(*order)
    -                assert_array_equal(expected.transpose(*order), transposed)
    +                assert_array_equal(expected_b.transpose(*order), transposed)
                     assert isinstance(
                         actual._data,
                         (