diff --git a/.travis.yml b/.travis.yml index 259b3b548e2..068ea3cc788 100644 --- a/.travis.yml +++ b/.travis.yml @@ -93,7 +93,7 @@ install: - python xarray/util/print_versions.py script: - - git diff upstream/master xarray/**/*py | flake8 --diff --exit-zero || true + - flake8 -j auto xarray - python -OO -c "import xarray" - py.test xarray --cov=xarray --cov-config ci/.coveragerc --cov-report term-missing --verbose $EXTRA_FLAGS diff --git a/xarray/backends/__init__.py b/xarray/backends/__init__.py index a8a4afc359a..d85893afb0b 100644 --- a/xarray/backends/__init__.py +++ b/xarray/backends/__init__.py @@ -11,3 +11,14 @@ from .scipy_ import ScipyDataStore from .h5netcdf_ import H5NetCDFStore from .zarr import ZarrStore + +__all__ = [ + 'AbstractDataStore', + 'InMemoryDataStore', + 'NetCDF4DataStore', + 'PydapDataStore', + 'NioDataStore', + 'ScipyDataStore', + 'H5NetCDFStore', + 'ZarrStore', +] diff --git a/xarray/backends/api.py b/xarray/backends/api.py index cdeb8c0c0c2..a322139ff45 100644 --- a/xarray/backends/api.py +++ b/xarray/backends/api.py @@ -27,18 +27,18 @@ def _get_default_engine(path, allow_remote=False): engine = 'netcdf4' except ImportError: try: - import pydap + import pydap # flake8: noqa engine = 'pydap' except ImportError: raise ValueError('netCDF4 or pydap is required for accessing ' 'remote datasets via OPeNDAP') else: try: - import netCDF4 + import netCDF4 # flake8: noqa engine = 'netcdf4' except ImportError: # pragma: no cover try: - import scipy.io.netcdf + import scipy.io.netcdf # flake8: noqa engine = 'scipy' except ImportError: raise ValueError('cannot read or write netCDF files without ' diff --git a/xarray/backends/h5netcdf_.py b/xarray/backends/h5netcdf_.py index 82abaade06a..1cd7b65f80b 100644 --- a/xarray/backends/h5netcdf_.py +++ b/xarray/backends/h5netcdf_.py @@ -55,6 +55,7 @@ def _open_h5netcdf_group(filename, mode, group): class H5NetCDFStore(WritableCFDataStore, DataStorePickleMixin): """Store for reading and writing data via h5netcdf """ + def __init__(self, filename, mode='r', format=None, group=None, writer=None, autoclose=False): if format not in [None, 'NETCDF4']: diff --git a/xarray/backends/memory.py b/xarray/backends/memory.py index f79e92439fe..4cecf1e7771 100644 --- a/xarray/backends/memory.py +++ b/xarray/backends/memory.py @@ -18,6 +18,7 @@ class InMemoryDataStore(AbstractWritableDataStore): This store exists purely for internal testing purposes. """ + def __init__(self, variables=None, attributes=None, writer=None): self._variables = OrderedDict() if variables is None else variables self._attributes = OrderedDict() if attributes is None else attributes diff --git a/xarray/backends/netCDF4_.py b/xarray/backends/netCDF4_.py index d8aa33f35dc..f5185742eb3 100644 --- a/xarray/backends/netCDF4_.py +++ b/xarray/backends/netCDF4_.py @@ -218,6 +218,7 @@ class NetCDF4DataStore(WritableCFDataStore, DataStorePickleMixin): This store supports NetCDF3, NetCDF4 and OpenDAP datasets. """ + def __init__(self, netcdf4_dataset, mode='r', writer=None, opener=None, autoclose=False): diff --git a/xarray/backends/pydap_.py b/xarray/backends/pydap_.py index a324767ccf8..297d96e47f4 100644 --- a/xarray/backends/pydap_.py +++ b/xarray/backends/pydap_.py @@ -60,6 +60,7 @@ class PydapDataStore(AbstractDataStore): This store provides an alternative way to access OpenDAP datasets that may be useful if the netCDF4 library is not available. """ + def __init__(self, ds): """ Parameters diff --git a/xarray/backends/pynio_.py b/xarray/backends/pynio_.py index ffa936c0466..37f1db1f6a7 100644 --- a/xarray/backends/pynio_.py +++ b/xarray/backends/pynio_.py @@ -40,6 +40,7 @@ def __getitem__(self, key): class NioDataStore(AbstractDataStore, DataStorePickleMixin): """Store for accessing datasets via PyNIO """ + def __init__(self, filename, mode='r', autoclose=False): import Nio opener = functools.partial(Nio.open_file, filename, mode=mode) diff --git a/xarray/backends/rasterio_.py b/xarray/backends/rasterio_.py index 2661bff39ec..6f9f34427d9 100644 --- a/xarray/backends/rasterio_.py +++ b/xarray/backends/rasterio_.py @@ -20,6 +20,7 @@ class RasterioArrayWrapper(BackendArray): """A wrapper around rasterio dataset objects""" + def __init__(self, rasterio_ds): self.rasterio_ds = rasterio_ds self._shape = (rasterio_ds.count, rasterio_ds.height, @@ -63,9 +64,9 @@ def __getitem__(self, key): elif is_scalar(k): # windowed operations will always return an array # we will have to squeeze it later - squeeze_axis.append(i+1) + squeeze_axis.append(i + 1) start = k - stop = k+1 + stop = k + 1 else: k = np.asarray(k) start = k[0] @@ -165,10 +166,10 @@ def open_rasterio(filename, chunks=None, cache=None, lock=None): dx, dy = riods.res[0], -riods.res[1] x0 = riods.bounds.right if dx < 0 else riods.bounds.left y0 = riods.bounds.top if dy < 0 else riods.bounds.bottom - coords['y'] = np.linspace(start=y0 + dy/2, num=ny, - stop=(y0 + (ny - 1) * dy) + dy/2) - coords['x'] = np.linspace(start=x0 + dx/2, num=nx, - stop=(x0 + (nx - 1) * dx) + dx/2) + coords['y'] = np.linspace(start=y0 + dy / 2, num=ny, + stop=(y0 + (ny - 1) * dy) + dy / 2) + coords['x'] = np.linspace(start=x0 + dx / 2, num=nx, + stop=(x0 + (nx - 1) * dx) + dx / 2) # Attributes attrs = {} diff --git a/xarray/backends/zarr.py b/xarray/backends/zarr.py index b4645aa8071..53051218761 100644 --- a/xarray/backends/zarr.py +++ b/xarray/backends/zarr.py @@ -71,8 +71,8 @@ def _replace_slices_with_arrays(key, shape): slice_count += 1 else: assert isinstance(k, np.ndarray) - k = k[(slice(None),) * array_subspace_size - + (np.newaxis,) * num_slices] + k = k[(slice(None),) * array_subspace_size + + (np.newaxis,) * num_slices] new_key.append(k) return tuple(new_key) diff --git a/xarray/conventions.py b/xarray/conventions.py index 27d0816524c..bdb4931081a 100644 --- a/xarray/conventions.py +++ b/xarray/conventions.py @@ -25,6 +25,7 @@ class StackedBytesArray(indexing.ExplicitlyIndexedNDArrayMixin): array('abc', dtype='|S3') """ + def __init__(self, array): """ Parameters @@ -70,6 +71,7 @@ class BytesToStringArray(indexing.ExplicitlyIndexedNDArrayMixin): array(['abc'], dtype=object) """ + def __init__(self, array, encoding='utf-8'): """ Parameters @@ -120,6 +122,7 @@ class NativeEndiannessArray(indexing.ExplicitlyIndexedNDArrayMixin): >>> NativeEndianArray(x)[:].dtype dtype('int16') """ + def __init__(self, array): self.array = indexing.as_indexable(array) @@ -148,6 +151,7 @@ class BoolTypeArray(indexing.ExplicitlyIndexedNDArrayMixin): >>> BoolTypeArray(x)[:].dtype dtype('bool') """ + def __init__(self, array): self.array = indexing.as_indexable(array) diff --git a/xarray/core/accessors.py b/xarray/core/accessors.py index 59aca6b67f0..5052b555c73 100644 --- a/xarray/core/accessors.py +++ b/xarray/core/accessors.py @@ -80,6 +80,7 @@ class DatetimeAccessor(object): `dayofyear` may not be accurate. """ + def __init__(self, xarray_obj): if not is_datetime_like(xarray_obj.dtype): raise TypeError("'dt' accessor only available for " diff --git a/xarray/core/alignment.py b/xarray/core/alignment.py index 24b7da88c6b..876245322fa 100644 --- a/xarray/core/alignment.py +++ b/xarray/core/alignment.py @@ -363,7 +363,7 @@ def var_indexers(var, indexers): "Indexer has dimensions {0:s} that are different " "from that to be indexed along {1:s}. " "This will behave differently in the future.".format( - str(indexer.dims), dim), + str(indexer.dims), dim), FutureWarning, stacklevel=3) if dim in variables: diff --git a/xarray/core/combine.py b/xarray/core/combine.py index 97c36c85a3c..b14d085f383 100644 --- a/xarray/core/combine.py +++ b/xarray/core/combine.py @@ -9,7 +9,8 @@ from .alignment import align from .merge import merge from .pycompat import iteritems, OrderedDict, basestring -from .variable import Variable, as_variable, IndexVariable, concat as concat_vars +from .variable import Variable, as_variable, IndexVariable, \ + concat as concat_vars def concat(objs, dim=None, data_vars='all', coords='different', @@ -103,12 +104,12 @@ def concat(objs, dim=None, data_vars='all', coords='different', if mode is not None: raise ValueError('`mode` is no longer a valid argument to ' - 'xarray.concat; it has been split into the `data_vars` ' - 'and `coords` arguments') + 'xarray.concat; it has been split into the ' + '`data_vars` and `coords` arguments') if concat_over is not None: raise ValueError('`concat_over` is no longer a valid argument to ' - 'xarray.concat; it has been split into the `data_vars` ' - 'and `coords` arguments') + 'xarray.concat; it has been split into the ' + '`data_vars` and `coords` arguments') if isinstance(first_obj, DataArray): f = _dataarray_concat @@ -166,8 +167,8 @@ def process_subset_opt(opt, subset): if k not in concat_over: # Compare the variable of all datasets vs. the one # of the first dataset. Perform the minimum amount of - # loads in order to avoid multiple loads from disk while - # keeping the RAM footprint low. + # loads in order to avoid multiple loads from disk + # while keeping the RAM footprint low. v_lhs = datasets[0].variables[k].load() # We'll need to know later on if variables are equal. computed = [] @@ -199,11 +200,11 @@ def process_subset_opt(opt, subset): if subset == 'coords': raise ValueError( 'some variables in coords are not coordinates on ' - 'the first dataset: %s' % invalid_vars) + 'the first dataset: %s' % (invalid_vars,)) else: raise ValueError( - 'some variables in data_vars are not data variables on ' - 'the first dataset: %s' % invalid_vars) + 'some variables in data_vars are not data variables ' + 'on the first dataset: %s' % (invalid_vars,)) concat_over.update(opt) process_subset_opt(data_vars, 'data_vars') @@ -275,7 +276,6 @@ def insert_result_variable(k, v): raise ValueError( 'variable %s not equal across datasets' % k) - # we've already verified everything is consistent; now, calculate # shared dimension sizes so we can expand the necessary variables dim_lengths = [ds.dims.get(dim, 1) for ds in datasets] @@ -377,8 +377,8 @@ def auto_combine(datasets, This method attempts to combine a list of datasets into a single entity by inspecting metadata and using a combination of concat and merge. - It does not concatenate along more than one dimension or sort data under any - circumstances. It does align coordinates, but different variables on + It does not concatenate along more than one dimension or sort data under + any circumstances. It does align coordinates, but different variables on datasets can cause it to fail under some scenarios. In complex cases, you may need to clean up your data and use ``concat``/``merge`` explicitly. @@ -392,9 +392,9 @@ def auto_combine(datasets, Dataset objects to merge. concat_dim : str or DataArray or Index, optional Dimension along which to concatenate variables, as used by - :py:func:`xarray.concat`. You only need to provide this argument if the - dimension along which you want to concatenate is not a dimension in - the original datasets, e.g., if you want to stack a collection of + :py:func:`xarray.concat`. You only need to provide this argument if + the dimension along which you want to concatenate is not a dimension + in the original datasets, e.g., if you want to stack a collection of 2D arrays along a third dimension. By default, xarray attempts to infer this argument by examining component files. Set ``concat_dim=None`` explicitly to disable diff --git a/xarray/core/common.py b/xarray/core/common.py index 3bfcd484474..1366d0ff03d 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -91,7 +91,7 @@ def __complex__(self): return complex(self.values) def __long__(self): - return long(self.values) + return long(self.values) # flake8: noqa def __array__(self, dtype=None): return np.asarray(self.values, dtype=dtype) @@ -608,7 +608,7 @@ def _resample_immediately(self, freq, dim, how, skipna, "calculations. Instead of passing 'dim' and " "'how=\"{how}\", instead consider using " ".resample({dim}=\"{freq}\").{how}() ".format( - dim=dim, freq=freq, how=how + dim=dim, freq=freq, how=how ), DeprecationWarning, stacklevel=3) if isinstance(dim, basestring): diff --git a/xarray/core/computation.py b/xarray/core/computation.py index e58b072f752..f1519027398 100644 --- a/xarray/core/computation.py +++ b/xarray/core/computation.py @@ -1,12 +1,11 @@ -"""Functions for applying functions that act on arrays to xarray's labeled data. +""" +Functions for applying functions that act on arrays to xarray's labeled data. NOT PUBLIC API. """ -import collections import functools import itertools import operator -import re import numpy as np @@ -36,6 +35,7 @@ class _UFuncSignature(object): output_core_dims : tuple[tuple] Core dimension names on each output variable. """ + def __init__(self, input_core_dims, output_core_dims=((),)): self.input_core_dims = tuple(tuple(a) for a in input_core_dims) self.output_core_dims = tuple(tuple(a) for a in output_core_dims) @@ -248,8 +248,8 @@ def assert_and_return_exact_match(all_keys): for keys in all_keys[1:]: if keys != first_keys: raise ValueError( - 'exact match required for all data variable names, but %r != %r' - % (keys, first_keys)) + 'exact match required for all data variable names, ' + 'but %r != %r' % (keys, first_keys)) return first_keys @@ -483,9 +483,10 @@ def broadcast_compat_data(variable, broadcast_dims, core_dims): set_old_dims = set(old_dims) missing_core_dims = [d for d in core_dims if d not in set_old_dims] if missing_core_dims: - raise ValueError('operand to apply_ufunc has required core dimensions ' - '%r, but some of these are missing on the input ' - 'variable: %r' % (list(core_dims), missing_core_dims)) + raise ValueError( + 'operand to apply_ufunc has required core dimensions %r, but ' + 'some of these are missing on the input variable: %r' + % (list(core_dims), missing_core_dims)) set_new_dims = set(new_dims) unexpected_dims = [d for d in old_dims if d not in set_new_dims] @@ -552,9 +553,11 @@ def apply_variable_ufunc(func, *args, **kwargs): input_dims = [broadcast_dims + dims for dims in signature.input_core_dims] numpy_func = func - func = lambda *arrays: _apply_with_dask_atop( - numpy_func, arrays, input_dims, output_dims, signature, - output_dtypes, output_sizes) + + def func(*arrays): + return _apply_with_dask_atop( + numpy_func, arrays, input_dims, output_dims, + signature, output_dtypes, output_sizes) elif dask == 'allowed': pass else: @@ -598,8 +601,8 @@ def _apply_with_dask_atop(func, args, input_dims, output_dims, signature, new_dims = signature.all_output_core_dims - signature.all_input_core_dims if any(dim not in output_sizes for dim in new_dims): raise ValueError("when using dask='parallelized' with apply_ufunc, " - 'output core dimensions not found on inputs must have ' - 'explicitly set sizes with ``output_sizes``: {}' + 'output core dimensions not found on inputs must ' + 'have explicitly set sizes with ``output_sizes``: {}' .format(new_dims)) for n, (data, core_dims) in enumerate( @@ -763,9 +766,9 @@ def apply_ufunc(func, *args, **kwargs): output_dtypes : list of dtypes, optional Optional list of output dtypes. Only used if dask='parallelized'. output_sizes : dict, optional - Optional mapping from dimension names to sizes for outputs. Only used if - dask='parallelized' and new dimensions (not found on inputs) appear on - outputs. + Optional mapping from dimension names to sizes for outputs. Only used + if dask='parallelized' and new dimensions (not found on inputs) appear + on outputs. Returns ------- @@ -845,7 +848,7 @@ def earth_mover_distance(first_samples, ---------- .. [1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html .. [2] http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html - """ + """ # noqa: E501 # don't error on that URL one line up from .groupby import GroupBy from .dataarray import DataArray from .variable import Variable diff --git a/xarray/core/coordinates.py b/xarray/core/coordinates.py index 6b83577a90a..60c01e8be72 100644 --- a/xarray/core/coordinates.py +++ b/xarray/core/coordinates.py @@ -107,7 +107,7 @@ def _merge_inplace(self, other): # don't include indexes in priority_vars, because we didn't align # first priority_vars = OrderedDict( - (k, v) for k, v in self.variables.items() if k not in self.dims) + kv for kv in self.variables.items() if kv[0] not in self.dims) variables = merge_coords_for_inplace_math( [self.variables, other.variables], priority_vars=priority_vars) yield @@ -152,6 +152,7 @@ class DatasetCoordinates(AbstractCoordinates): dimensions and the values given by the corresponding xarray.Coordinate objects. """ + def __init__(self, dataset): self._data = dataset @@ -210,6 +211,7 @@ class DataArrayCoordinates(AbstractCoordinates): Essentially an OrderedDict with keys given by the array's dimensions and the values given by corresponding DataArray objects. """ + def __init__(self, dataarray): self._data = dataarray @@ -256,6 +258,7 @@ class LevelCoordinatesSource(object): Used for attribute style lookup with AttrAccessMixin. Not returned directly by any public methods. """ + def __init__(self, data_object): self._data = data_object @@ -270,6 +273,7 @@ def __iter__(self): class Indexes(Mapping, formatting.ReprMixin): """Ordered Mapping[str, pandas.Index] for xarray objects. """ + def __init__(self, variables, sizes): """Not for public consumption. diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index a522fe60b2d..8e1ec8ab7b8 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -19,7 +19,6 @@ from .accessors import DatetimeAccessor from .alignment import align, reindex_like_indexers from .common import AbstractArray, BaseDataObject -from .computation import apply_ufunc from .coordinates import (DataArrayCoordinates, LevelCoordinatesSource, Indexes, assert_coordinate_consistent, remap_label_indexers) @@ -317,7 +316,7 @@ def _to_dataset_whole(self, name=None, shallow_copy=True): if name in self.coords: raise ValueError('cannot create a Dataset from a DataArray with ' 'the same name as one of its coordinates') - # use private APIs here for speed: this is called by _to_temp_dataset(), + # use private APIs for speed: this is called by _to_temp_dataset(), # which is used in the guts of a lot of operations (e.g., reindex) variables = self._coords.copy() variables[name] = self.variable @@ -429,9 +428,9 @@ def to_index(self): def dims(self): """Tuple of dimension names associated with this array. - Note that the type of this property is inconsistent with `Dataset.dims`. - See `Dataset.sizes` and `DataArray.sizes` for consistently named - properties. + Note that the type of this property is inconsistent with + `Dataset.dims`. See `Dataset.sizes` and `DataArray.sizes` for + consistently named properties. """ return self.variable.dims @@ -869,12 +868,11 @@ def reindex(self, method=None, tolerance=None, copy=True, **indexers): Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations most satisfy the equation ``abs(index[indexer] - target) <= tolerance``. - Requires pandas>=0.17. **indexers : dict Dictionary with keys given by dimension names and values given by - arrays of coordinates tick labels. Any mis-matched coordinate values - will be filled in with NaN, and any mis-matched dimension names will - simply be ignored. + arrays of coordinates tick labels. Any mis-matched coordinate + values will be filled in with NaN, and any mis-matched dimension + names will simply be ignored. Returns ------- @@ -944,8 +942,8 @@ def swap_dims(self, dims_dict): return self._from_temp_dataset(ds) def expand_dims(self, dim, axis=None): - """Return a new object with an additional axis (or axes) inserted at the - corresponding position in the array shape. + """Return a new object with an additional axis (or axes) inserted at + the corresponding position in the array shape. If dim is already a scalar coordinate, it will be promoted to a 1D coordinate consisting of a single value. @@ -971,7 +969,8 @@ def expand_dims(self, dim, axis=None): return self._from_temp_dataset(ds) def set_index(self, append=False, inplace=False, **indexes): - """Set DataArray (multi-)indexes using one or more existing coordinates. + """Set DataArray (multi-)indexes using one or more existing + coordinates. Parameters ---------- @@ -979,8 +978,8 @@ def set_index(self, append=False, inplace=False, **indexes): If True, append the supplied index(es) to the existing index(es). Otherwise replace the existing index(es) (default). inplace : bool, optional - If True, set new index(es) in-place. Otherwise, return a new DataArray - object. + If True, set new index(es) in-place. Otherwise, return a new + DataArray object. **indexes : {dim: index, ...} Keyword arguments with names matching dimensions and values given by (lists of) the names of existing coordinates or variables to set @@ -989,7 +988,7 @@ def set_index(self, append=False, inplace=False, **indexes): Returns ------- obj : DataArray - Another dataarray, with this dataarray's data but replaced coordinates. + Another dataarray, with this data but replaced coordinates. See Also -------- @@ -1608,9 +1607,9 @@ def from_series(cls, series): """Convert a pandas.Series into an xarray.DataArray. If the series's index is a MultiIndex, it will be expanded into a - tensor product of one-dimensional coordinates (filling in missing values - with NaN). Thus this operation should be the inverse of the `to_series` - method. + tensor product of one-dimensional coordinates (filling in missing + values with NaN). Thus this operation should be the inverse of the + `to_series` method. """ # TODO: add a 'name' parameter name = series.name @@ -2090,16 +2089,16 @@ def quantile(self, q, dim=None, interpolation='linear', keep_attrs=False): numpy.nanpercentile, pandas.Series.quantile, Dataset.quantile """ - ds = self._to_temp_dataset().quantile(q, dim=dim, keep_attrs=keep_attrs, - interpolation=interpolation) + ds = self._to_temp_dataset().quantile( + q, dim=dim, keep_attrs=keep_attrs, interpolation=interpolation) return self._from_temp_dataset(ds) def rank(self, dim, pct=False, keep_attrs=False): """Ranks the data. Equal values are assigned a rank that is the average of the ranks that - would have been otherwise assigned to all of the values within that set. - Ranks begin at 1, not 0. If pct is True, computes percentage ranks. + would have been otherwise assigned to all of the values within that + set. Ranks begin at 1, not 0. If pct, computes percentage ranks. NaNs in the input array are returned as NaNs. diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 58847bb0086..5e75e97ef31 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -1252,7 +1252,7 @@ def chunk(self, chunks=None, name_prefix='xarray-', token=None, try: from dask.base import tokenize except ImportError: - import dask # raise the usual error if dask is entirely missing + import dask # raise the usual error if dask is entirely missing # flake8: noqa raise ImportError('xarray requires dask version 0.6 or newer') if isinstance(chunks, Number): diff --git a/xarray/core/extensions.py b/xarray/core/extensions.py index affb55b3298..90639e47f43 100644 --- a/xarray/core/extensions.py +++ b/xarray/core/extensions.py @@ -15,6 +15,7 @@ class AccessorRegistrationWarning(Warning): class _CachedAccessor(object): """Custom property-like object (descriptor) for caching accessors.""" + def __init__(self, name, accessor): self._name = name self._accessor = accessor @@ -26,9 +27,9 @@ def __get__(self, obj, cls): try: accessor_obj = self._accessor(obj) except AttributeError: - # __getattr__ on data object will swallow any AttributeErrors raised - # when initializing the accessor, so we need to raise as something - # else (GH933): + # __getattr__ on data object will swallow any AttributeErrors + # raised when initializing the accessor, so we need to raise as + # something else (GH933): msg = 'error initializing %r accessor.' % self._name if PY2: msg += ' Full traceback:\n' + traceback.format_exc() diff --git a/xarray/core/formatting.py b/xarray/core/formatting.py index 6f91f73738c..83f8e2719d6 100644 --- a/xarray/core/formatting.py +++ b/xarray/core/formatting.py @@ -21,7 +21,6 @@ from .options import OPTIONS from .pycompat import PY2, unicode_type, bytes_type, dask_array_type -from .indexing import BasicIndexer def pretty_print(x, numchars): @@ -60,6 +59,7 @@ def ensure_valid_repr(string): class ReprMixin(object): """Mixin that defines __repr__ for a class that already has __unicode__.""" + def __repr__(self): return ensure_valid_repr(self.__unicode__()) diff --git a/xarray/core/groupby.py b/xarray/core/groupby.py index b299380e6c6..c4b25741d5b 100644 --- a/xarray/core/groupby.py +++ b/xarray/core/groupby.py @@ -80,9 +80,9 @@ def _consolidate_slices(slices): for slice_ in slices: if not isinstance(slice_, slice): raise ValueError('list element is not a slice: %r' % slice_) - if (result and last_slice.stop == slice_.start - and _is_one_or_none(last_slice.step) - and _is_one_or_none(slice_.step)): + if (result and last_slice.stop == slice_.start and + _is_one_or_none(last_slice.step) and + _is_one_or_none(slice_.step)): last_slice = slice(last_slice.start, slice_.stop, slice_.step) result[-1] = last_slice else: @@ -172,6 +172,7 @@ class GroupBy(object): Dataset.groupby DataArray.groupby """ + def __init__(self, obj, group, squeeze=False, grouper=None, bins=None, cut_kwargs={}): """Create a GroupBy object @@ -441,6 +442,7 @@ def _maybe_reorder(xarray_obj, dim, positions): class DataArrayGroupBy(GroupBy, ImplementsArrayReduce): """GroupBy object specialized to grouping DataArray objects """ + def _iter_grouped_shortcut(self): """Fast version of `_iter_grouped` that yields Variables without metadata @@ -548,8 +550,8 @@ def reduce(self, func, dim=None, axis=None, keep_attrs=False, ---------- func : function Function which can be called in the form - `func(x, axis=axis, **kwargs)` to return the result of collapsing an - np.ndarray over an integer valued axis. + `func(x, axis=axis, **kwargs)` to return the result of collapsing + an np.ndarray over an integer valued axis. dim : str or sequence of str, optional Dimension(s) over which to apply `func`. axis : int or sequence of int, optional @@ -573,6 +575,7 @@ def reduce_array(ar): return ar.reduce(func, dim, axis, keep_attrs=keep_attrs, **kwargs) return self.apply(reduce_array, shortcut=shortcut) + ops.inject_reduce_methods(DataArrayGroupBy) ops.inject_binary_ops(DataArrayGroupBy) @@ -629,8 +632,8 @@ def reduce(self, func, dim=None, keep_attrs=False, **kwargs): ---------- func : function Function which can be called in the form - `func(x, axis=axis, **kwargs)` to return the result of collapsing an - np.ndarray over an integer valued axis. + `func(x, axis=axis, **kwargs)` to return the result of collapsing + an np.ndarray over an integer valued axis. dim : str or sequence of str, optional Dimension(s) over which to apply `func`. axis : int or sequence of int, optional @@ -663,5 +666,6 @@ def assign(self, **kwargs): """ return self.apply(lambda ds: ds.assign(**kwargs)) + ops.inject_reduce_methods(DatasetGroupBy) ops.inject_binary_ops(DatasetGroupBy) diff --git a/xarray/core/indexing.py b/xarray/core/indexing.py index 1c5cc2e5721..4f1bdb415c1 100644 --- a/xarray/core/indexing.py +++ b/xarray/core/indexing.py @@ -76,9 +76,9 @@ def _asarray_tuplesafe(values): def _is_nested_tuple(possible_tuple): - return (isinstance(possible_tuple, tuple) - and any(isinstance(value, (tuple, list, slice)) - for value in possible_tuple)) + return (isinstance(possible_tuple, tuple) and + any(isinstance(value, (tuple, list, slice)) + for value in possible_tuple)) def _index_method_kwargs(method, tolerance): @@ -125,8 +125,8 @@ def convert_label_indexer(index, label, index_name='', method=None, _try_get_item(label.stop), _try_get_item(label.step)) if not isinstance(indexer, slice): - # unlike pandas, in xarray we never want to silently convert a slice - # indexer into an array indexer + # unlike pandas, in xarray we never want to silently convert a + # slice indexer into an array indexer raise KeyError('cannot represent labeled-based slice indexer for ' 'dimension %r with a slice over integer positions; ' 'the index is unsorted or non-unique' % index_name) @@ -134,8 +134,8 @@ def convert_label_indexer(index, label, index_name='', method=None, elif is_dict_like(label): is_nested_vals = _is_nested_tuple(tuple(label.values())) if not isinstance(index, pd.MultiIndex): - raise ValueError('cannot use a dict-like object for selection on a ' - 'dimension that does not have a MultiIndex') + raise ValueError('cannot use a dict-like object for selection on ' + 'a dimension that does not have a MultiIndex') elif len(label) == index.nlevels and not is_nested_vals: indexer = index.get_loc(tuple((label[k] for k in index.names))) else: @@ -145,7 +145,7 @@ def convert_label_indexer(index, label, index_name='', method=None, raise ValueError('Vectorized selection is not ' 'available along level variable: ' + k) indexer, new_index = index.get_loc_level( - tuple(label.values()), level=tuple(label.keys())) + tuple(label.values()), level=tuple(label.keys())) elif isinstance(label, tuple) and isinstance(index, pd.MultiIndex): if _is_nested_tuple(label): @@ -290,6 +290,7 @@ class ExplicitIndexer(object): Do not instantiate BaseIndexer objects directly: instead, use one of the sub-classes BasicIndexer, OuterIndexer or VectorizedIndexer. """ + def __init__(self, key): if type(self) is ExplicitIndexer: raise TypeError('cannot instantiate base ExplicitIndexer objects') @@ -321,6 +322,7 @@ class BasicIndexer(ExplicitIndexer): rules for basic indexing: each axis is independently sliced and axes indexed with an integer are dropped from the result. """ + def __init__(self, key): if not isinstance(key, tuple): raise TypeError('key must be a tuple: {!r}'.format(key)) @@ -347,6 +349,7 @@ class OuterIndexer(ExplicitIndexer): axes indexed with an integer are dropped from the result. This type of indexing works like MATLAB/Fortran. """ + def __init__(self, key): if not isinstance(key, tuple): raise TypeError('key must be a tuple: {!r}'.format(key)) @@ -383,6 +386,7 @@ class VectorizedIndexer(ExplicitIndexer): (including broadcasting) except sliced axes are always moved to the end: https://github.com/numpy/numpy/pull/6256 """ + def __init__(self, key): if not isinstance(key, tuple): raise TypeError('key must be a tuple: {!r}'.format(key)) @@ -458,6 +462,7 @@ def __getitem__(self, key): class LazilyIndexedArray(ExplicitlyIndexedNDArrayMixin): """Wrap an array to make basic and orthogonal indexing lazy. """ + def __init__(self, array, key=None): """ Parameters diff --git a/xarray/core/merge.py b/xarray/core/merge.py index be11be2209b..c5e643adb0d 100644 --- a/xarray/core/merge.py +++ b/xarray/core/merge.py @@ -507,8 +507,9 @@ def merge(objects, compat='no_conflicts', join='outer'): from .dataarray import DataArray from .dataset import Dataset - dict_like_objects = [obj.to_dataset() if isinstance(obj, DataArray) else obj - for obj in objects] + dict_like_objects = [ + obj.to_dataset() if isinstance(obj, DataArray) else obj + for obj in objects] variables, coord_names, dims = merge_core(dict_like_objects, compat, join) merged = Dataset._construct_direct(variables, coord_names, dims) @@ -549,4 +550,5 @@ def dataset_merge_method(dataset, other, overwrite_vars, compat, join): def dataset_update_method(dataset, other): """Guts of the Dataset.update method""" - return merge_core([dataset, other], priority_arg=1, indexes=dataset.indexes) + return merge_core([dataset, other], priority_arg=1, + indexes=dataset.indexes) diff --git a/xarray/core/npcompat.py b/xarray/core/npcompat.py index fa01e37e94f..bbe7b745621 100644 --- a/xarray/core/npcompat.py +++ b/xarray/core/npcompat.py @@ -33,8 +33,8 @@ def _replace_nan(a, val): If `a` is of inexact type, return a copy of `a` with the NaNs replaced by the fill value, otherwise return `a`. mask: {bool, None} - If `a` is of inexact type, return a boolean mask marking locations of - NaNs, otherwise return None. + If `a` is of inexact type, return a boolean mask marking locations + of NaNs, otherwise return None. """ is_new = not isinstance(a, np.ndarray) @@ -206,7 +206,7 @@ def flip(m, axis): ----- flip(m, 0) is equivalent to flipud(m). flip(m, 1) is equivalent to fliplr(m). - flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n. + flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at index n. Examples -------- diff --git a/xarray/core/nputils.py b/xarray/core/nputils.py index a721425b839..8ac04752e85 100644 --- a/xarray/core/nputils.py +++ b/xarray/core/nputils.py @@ -120,6 +120,7 @@ class NumpyVIndexAdapter(object): This is a pure Python implementation of (some of) the logic in this NumPy proposal: https://github.com/numpy/numpy/pull/6256 """ + def __init__(self, array): self._array = array diff --git a/xarray/core/ops.py b/xarray/core/ops.py index 2ed3f81d185..d02b8fa3108 100644 --- a/xarray/core/ops.py +++ b/xarray/core/ops.py @@ -262,7 +262,7 @@ def inject_reduce_methods(cls): def inject_cum_methods(cls): methods = ([(name, getattr(duck_array_ops, name), True) - for name in NAN_CUM_METHODS]) + for name in NAN_CUM_METHODS]) for name, f, include_skipna in methods: numeric_only = getattr(f, 'numeric_only', False) func = cls._reduce_method(f, include_skipna, numeric_only) diff --git a/xarray/core/options.py b/xarray/core/options.py index abae7427f9a..9f06f8dbbae 100644 --- a/xarray/core/options.py +++ b/xarray/core/options.py @@ -35,6 +35,7 @@ class set_options(object): >>> xr.set_options(display_width=80) """ + def __init__(self, **kwargs): invalid_options = {k for k in kwargs if k not in OPTIONS} if invalid_options: diff --git a/xarray/core/pycompat.py b/xarray/core/pycompat.py index a73a27f9643..4b83df9e14f 100644 --- a/xarray/core/pycompat.py +++ b/xarray/core/pycompat.py @@ -1,3 +1,5 @@ +# flake8: noqa + from __future__ import absolute_import from __future__ import division from __future__ import print_function @@ -126,6 +128,7 @@ class ExitStack(object): # in the list raise an exception """ + def __init__(self): self._exit_callbacks = deque() @@ -161,7 +164,7 @@ def push(self, exit): self._exit_callbacks.append(exit) else: self._push_cm_exit(exit, exit_method) - return exit # Allow use as a decorator + return exit # Allow use as a decorator def callback(self, callback, *args, **kwds): """Registers an arbitrary callback and arguments. @@ -174,7 +177,7 @@ def _exit_wrapper(exc_type, exc, tb): # setting __wrapped__ may still help with introspection _exit_wrapper.__wrapped__ = callback self.push(_exit_wrapper) - return callback # Allow use as a decorator + return callback # Allow use as a decorator def enter_context(self, cm): """Enters the supplied context manager diff --git a/xarray/core/rolling.py b/xarray/core/rolling.py index 6ac668f9349..8209e70e5a8 100644 --- a/xarray/core/rolling.py +++ b/xarray/core/rolling.py @@ -87,7 +87,8 @@ def __repr__(self): """provide a nice str repr of our rolling object""" attrs = ["{k}->{v}".format(k=k, v=getattr(self, k)) - for k in self._attributes if getattr(self, k, None) is not None] + for k in self._attributes + if getattr(self, k, None) is not None] return "{klass} [{attrs}]".format(klass=self.__class__.__name__, attrs=','.join(attrs)) @@ -122,6 +123,7 @@ class DataArrayRolling(Rolling): + rolling.DataArrayRolling + ops.inject_bottleneck_rolling_methods """ + def __init__(self, obj, min_periods=None, center=False, **windows): super(DataArrayRolling, self).__init__(obj, min_periods=min_periods, center=center, **windows) @@ -228,6 +230,7 @@ def _reduce_method(cls, func): Methods to return a wrapped function for any function `func` for numpy methods. """ + def wrapped_func(self, **kwargs): return self.reduce(func, **kwargs) return wrapped_func @@ -238,6 +241,7 @@ def _bottleneck_reduce(cls, func): Methods to return a wrapped function for any function `func` for bottoleneck method, except for `median`. """ + def wrapped_func(self, **kwargs): from .dataarray import DataArray @@ -285,6 +289,7 @@ class DatasetRolling(Rolling): Dataset.rolling DataArray.rolling """ + def __init__(self, obj, min_periods=None, center=False, **windows): """ Moving window object for Dataset. @@ -355,6 +360,7 @@ def _reduce_method(cls, func): Return a wrapped function for injecting numpy and bottoleneck methods. see ops.inject_datasetrolling_methods """ + def wrapped_func(self, **kwargs): from .dataset import Dataset reduced = OrderedDict() @@ -367,5 +373,6 @@ def wrapped_func(self, **kwargs): return Dataset(reduced, coords=self.obj.coords) return wrapped_func + inject_bottleneck_rolling_methods(DataArrayRolling) inject_datasetrolling_methods(DatasetRolling) diff --git a/xarray/core/utils.py b/xarray/core/utils.py index 46fea23b9ff..7edca42b4dd 100644 --- a/xarray/core/utils.py +++ b/xarray/core/utils.py @@ -271,6 +271,7 @@ class SingleSlotPickleMixin(object): """Mixin class to add the ability to pickle objects whose state is defined by a single __slots__ attribute. Only necessary under Python 2. """ + def __getstate__(self): return getattr(self, self.__slots__[0]) @@ -386,6 +387,7 @@ class OrderedSet(MutableSet): The API matches the builtin set, but it preserves insertion order of elements, like an OrderedDict. """ + def __init__(self, values=None): self._ordered_dict = OrderedDict() if values is not None: diff --git a/xarray/core/variable.py b/xarray/core/variable.py index e3bead51a94..d4863014f59 100644 --- a/xarray/core/variable.py +++ b/xarray/core/variable.py @@ -287,7 +287,8 @@ def nbytes(self): @property def _in_memory(self): - return (isinstance(self._data, (np.ndarray, np.number, PandasIndexAdapter)) or + return (isinstance(self._data, (np.ndarray, np.number, + PandasIndexAdapter)) or (isinstance(self._data, indexing.MemoryCachedArray) and isinstance(self._data.array, indexing.NumpyIndexingAdapter))) @@ -381,7 +382,7 @@ def __dask_postpersist__(self): def _dask_finalize(results, array_func, array_args, dims, attrs, encoding): if isinstance(results, dict): # persist case name = array_args[0] - results = {k: v for k, v in results.items() if k[0] == name} # cull + results = {k: v for k, v in results.items() if k[0] == name} data = array_func(results, *array_args) return Variable(dims, data, attrs=attrs, encoding=encoding) @@ -1152,8 +1153,8 @@ def unstack(self, **dimensions): Parameters ---------- **dimensions : keyword arguments of the form old_dim={dim1: size1, ...} - Names of existing dimensions, and the new dimensions and sizes that they - map to. + Names of existing dimensions, and the new dimensions and sizes + that they map to. Returns ------- @@ -1248,9 +1249,9 @@ def concat(cls, variables, dim='concat_dim', positions=None, dimension is unchanged. Where to insert the new dimension is determined by the first variable. positions : None or list of integer arrays, optional - List of integer arrays which specifies the integer positions to which - to assign each dataset along the concatenated dimension. If not - supplied, objects are concatenated in the provided order. + List of integer arrays which specifies the integer positions to + which to assign each dataset along the concatenated dimension. + If not supplied, objects are concatenated in the provided order. shortcut : bool, optional This option is used internally to speed-up groupby operations. If `shortcut` is True, some checks of internal consistency between @@ -1418,8 +1419,8 @@ def rank(self, dim, pct=False): """Ranks the data. Equal values are assigned a rank that is the average of the ranks that - would have been otherwise assigned to all of the values within that set. - Ranks begin at 1, not 0. If pct is True, computes percentage ranks. + would have been otherwise assigned to all of the values within that + set. Ranks begin at 1, not 0. If `pct`, computes percentage ranks. NaNs in the input array are returned as NaNs. diff --git a/xarray/plot/__init__.py b/xarray/plot/__init__.py index cb0cca43788..fe2c604a89e 100644 --- a/xarray/plot/__init__.py +++ b/xarray/plot/__init__.py @@ -5,3 +5,14 @@ hist, imshow, pcolormesh) from .facetgrid import FacetGrid + +__all__ = [ + 'plot', + 'line', + 'contour', + 'contourf', + 'hist', + 'imshow', + 'pcolormesh', + 'FacetGrid', +] diff --git a/xarray/plot/plot.py b/xarray/plot/plot.py index 2cc39241556..99ab4176714 100644 --- a/xarray/plot/plot.py +++ b/xarray/plot/plot.py @@ -49,8 +49,8 @@ def _ensure_plottable(*args): other_types = [datetime] for x in args: - if not (_valid_numpy_subdtype(np.array(x), numpy_types) - or _valid_other_type(np.array(x), other_types)): + if not (_valid_numpy_subdtype(np.array(x), numpy_types) or + _valid_other_type(np.array(x), other_types)): raise TypeError('Plotting requires coordinates to be numeric ' 'or dates.') @@ -206,7 +206,7 @@ def line(darray, *args, **kwargs): xlabel, = darray.dims if x is not None and xlabel != x: raise ValueError('Input does not have specified dimension' - + ' {!r}'.format(x)) + ' {!r}'.format(x)) x = darray.coords[xlabel] diff --git a/xarray/plot/utils.py b/xarray/plot/utils.py index abd62df2296..58b4d55e0c5 100644 --- a/xarray/plot/utils.py +++ b/xarray/plot/utils.py @@ -244,7 +244,7 @@ def _determine_cmap_params(plot_data, vmin=None, vmax=None, cmap=None, levels = np.linspace(vmin, vmax, levels) else: # N in MaxNLocator refers to bins, not ticks - ticker = mpl.ticker.MaxNLocator(levels-1) + ticker = mpl.ticker.MaxNLocator(levels - 1) levels = ticker.tick_values(vmin, vmax) vmin, vmax = levels[0], levels[-1] diff --git a/xarray/tests/__init__.py b/xarray/tests/__init__.py index 9afe6f43850..7fdfca4ee26 100644 --- a/xarray/tests/__init__.py +++ b/xarray/tests/__init__.py @@ -22,7 +22,7 @@ from pandas.testing import assert_frame_equal except ImportError: # old location, for pandas < 0.20 - from pandas.util.testing import assert_frame_equal + from pandas.util.testing import assert_frame_equal # noqa: F401 try: import unittest2 as unittest @@ -32,7 +32,7 @@ try: from unittest import mock except ImportError: - import mock + import mock # noqa: F401 # import mpl and change the backend before other mpl imports try: @@ -86,7 +86,7 @@ def _importorskip(modname, minversion=None): try: import_seaborn() has_seaborn = True -except: +except ImportError: has_seaborn = False requires_seaborn = unittest.skipUnless(has_seaborn, reason='requires seaborn') diff --git a/xarray/tests/test_backends.py b/xarray/tests/test_backends.py index 970189436da..375107a1943 100644 --- a/xarray/tests/test_backends.py +++ b/xarray/tests/test_backends.py @@ -2,7 +2,6 @@ from __future__ import division from __future__ import print_function from io import BytesIO -from threading import Lock import contextlib import itertools import os.path @@ -457,7 +456,7 @@ def find_and_validate_array(obj): assert isinstance(obj, indexing.PandasIndexAdapter) else: raise TypeError('{} is wrapped by {}'.format( - type(obj.array), type(obj))) + type(obj.array), type(obj))) for k, v in ds.variables.items(): find_and_validate_array(v._data) @@ -600,7 +599,8 @@ def equals_latlon(obj): original.to_netcdf(tmp_file) with open_dataset(tmp_file, decode_coords=False) as ds: self.assertTrue(equals_latlon(ds['temp'].attrs['coordinates'])) - self.assertTrue(equals_latlon(ds['precip'].attrs['coordinates'])) + self.assertTrue( + equals_latlon(ds['precip'].attrs['coordinates'])) self.assertNotIn('coordinates', ds.attrs) self.assertNotIn('coordinates', ds['lat'].attrs) self.assertNotIn('coordinates', ds['lon'].attrs) @@ -642,7 +642,7 @@ def test_invalid_dataarray_names_raise(self): for name, e in zip([0, (4, 5), True, ''], [te, te, te, ve]): ds = Dataset({name: da}) with raises_regex(*e): - with self.roundtrip(ds) as actual: + with self.roundtrip(ds): pass def test_encoding_kwarg(self): @@ -794,7 +794,7 @@ def test_open_group(self): open_dataset(tmp_file, group=(1, 2, 3)) def test_open_subgroup(self): - # Create a netCDF file with a dataset stored within a group within a group + # Create a netCDF file with a dataset within a group within a group with create_tmp_file() as tmp_file: rootgrp = nc4.Dataset(tmp_file, 'w') foogrp = rootgrp.createGroup('foo') @@ -890,7 +890,8 @@ def test_open_encodings(self): actual_encoding = dict((k, v) for k, v in iteritems(actual['time'].encoding) if k in expected['time'].encoding) - self.assertDictEqual(actual_encoding, expected['time'].encoding) + self.assertDictEqual(actual_encoding, + expected['time'].encoding) def test_dump_encodings(self): # regression test for #709 @@ -915,8 +916,10 @@ def test_dump_and_open_encodings(self): with create_tmp_file() as tmp_file2: xarray_dataset.to_netcdf(tmp_file2) with nc4.Dataset(tmp_file2, 'r') as ds: - self.assertEqual(ds.variables['time'].getncattr('units'), units) - self.assertArrayEqual(ds.variables['time'], np.arange(10) + 4) + self.assertEqual( + ds.variables['time'].getncattr('units'), units) + self.assertArrayEqual( + ds.variables['time'], np.arange(10) + 4) def test_compression_encoding(self): data = create_test_data() @@ -1045,8 +1048,10 @@ def test_unsorted_index_raises(self): with self.roundtrip(ds) as ondisk: inds = np.argsort(dim1) ds2 = ondisk.isel(dim1=inds) + # Older versions of NetCDF4 raise an exception here, and if so we + # want to ensure we improve (that is, replace) the error message try: - print(ds2.randovar.values) # should raise IndexError in netCDF4 + ds2.randovar.values except IndexError as err: self.assertIn('first by calling .load', str(err)) @@ -1196,8 +1201,8 @@ def test_hidden_zarr_keys(self): # JSON only has a single array type, which maps to list in Python. # In contrast, dims in xarray is always a tuple. for var in expected.variables.keys(): - assert (zarr_group[var].attrs[self.DIMENSION_KEY] - == list(expected[var].dims)) + assert (zarr_group[var].attrs[self.DIMENSION_KEY] == + list(expected[var].dims)) with xr.decode_cf(store) as actual: # make sure it is hidden @@ -1599,13 +1604,14 @@ def validate_open_mfdataset_autoclose(self, engine, nfiles=10): else 'netcdf4') # split into multiple sets of temp files for ii in original.x.values: - subds = original.isel(x=slice(ii, ii+1)) + subds = original.isel(x=slice(ii, ii + 1)) subds.to_netcdf(tmpfiles[ii], engine=writeengine) # check that calculation on opened datasets works properly ds = open_mfdataset(tmpfiles, engine=readengine, autoclose=True) - self.assertAllClose(ds.x.sum().values, (nfiles*(nfiles-1))/2) + self.assertAllClose(ds.x.sum().values, + (nfiles * (nfiles - 1)) / 2) self.assertAllClose(ds.foo.sum().values, np.sum(randdata)) self.assertAllClose(ds.sum().foo.values, np.sum(randdata)) ds.close() @@ -1704,14 +1710,14 @@ def gen_datasets_with_common_coord_and_time(self): coords={ 't': (['t', ], t1), 'x': (['x', ], x) - }) + }) ds2 = Dataset(data_vars={self.var_name: (['t', 'x'], v2), self.coord_name: ('x', 2 * x)}, coords={ 't': (['t', ], t2), 'x': (['x', ], x) - }) + }) return ds1, ds2 @@ -1865,7 +1871,7 @@ def test_attrs_mfdataset(self): self.assertEqual(actual.test1, ds1.test1) # attributes from ds2 are not retained, e.g., with raises_regex(AttributeError, - 'no attribute'): + 'no attribute'): actual.test2 def test_preprocess_mfdataset(self): @@ -1983,7 +1989,7 @@ def test_dataarray_compute(self): # Test DataArray.compute() on dask backend. # The test for Dataset.compute() is already in DatasetIOTestCases; # however dask is the only tested backend which supports DataArrays - actual = DataArray([1,2]).chunk() + actual = DataArray([1, 2]).chunk() computed = actual.compute() self.assertFalse(actual._in_memory) self.assertTrue(computed._in_memory) @@ -2034,13 +2040,14 @@ def test_cmp_local_file(self): self.assertNotIn('NC_GLOBAL', actual.attrs) self.assertIn('history', actual.attrs) - # we don't check attributes exactly with assertDatasetIdentical() because - # the test DAP server seems to insert some extra attributes not found in the - # netCDF file. + # we don't check attributes exactly with assertDatasetIdentical() + # because the test DAP server seems to insert some extra + # attributes not found in the netCDF file. assert actual.attrs.keys() == expected.attrs.keys() with self.create_datasets() as (actual, expected): - self.assertDatasetEqual(actual.isel(l=2), expected.isel(l=2)) + self.assertDatasetEqual( + actual.isel(l=2), expected.isel(l=2)) # noqa: E741 with self.create_datasets() as (actual, expected): self.assertDatasetEqual(actual.isel(i=0, j=-1), @@ -2144,7 +2151,7 @@ def test_serialization(self): with create_tmp_file(suffix='.tif') as tmp_file: # data nx, ny, nz = 4, 3, 3 - data = np.arange(nx*ny*nz, + data = np.arange(nx * ny * nz, dtype=rasterio.float32).reshape(nz, ny, nx) transform = from_origin(5000, 80000, 1000, 2000.) with rasterio.open( @@ -2171,7 +2178,7 @@ def test_utm(self): with create_tmp_file(suffix='.tif') as tmp_file: # data nx, ny, nz = 4, 3, 3 - data = np.arange(nx*ny*nz, + data = np.arange(nx * ny * nz, dtype=rasterio.float32).reshape(nz, ny, nx) transform = from_origin(5000, 80000, 1000, 2000.) with rasterio.open( @@ -2185,12 +2192,11 @@ def test_utm(self): dx, dy = s.res[0], -s.res[1] # Tests - expected = DataArray(data, dims=('band', 'y', 'x'), - coords={ - 'band': [1, 2, 3], - 'y': -np.arange(ny) * 2000 + 80000 + dy/2, - 'x': np.arange(nx) * 1000 + 5000 + dx/2, - }) + expected = DataArray(data, dims=('band', 'y', 'x'), coords={ + 'band': [1, 2, 3], + 'y': -np.arange(ny) * 2000 + 80000 + dy / 2, + 'x': np.arange(nx) * 1000 + 5000 + dx / 2, + }) with xr.open_rasterio(tmp_file) as rioda: assert_allclose(rioda, expected) assert 'crs' in rioda.attrs @@ -2226,8 +2232,8 @@ def test_platecarree(self): expected = DataArray(data[np.newaxis, ...], dims=('band', 'y', 'x'), coords={'band': [1], - 'y': -np.arange(ny)*2 + 2 + dy/2, - 'x': np.arange(nx)*0.5 + 1 + dx/2, + 'y': -np.arange(ny) * 2 + 2 + dy / 2, + 'x': np.arange(nx) * 0.5 + 1 + dx / 2, }) with xr.open_rasterio(tmp_file) as rioda: assert_allclose(rioda, expected) @@ -2249,7 +2255,7 @@ def test_indexing(self): with create_tmp_file(suffix='.tif') as tmp_file: # data nx, ny, nz = 8, 10, 3 - data = np.arange(nx*ny*nz, + data = np.arange(nx * ny * nz, dtype=rasterio.float32).reshape(nz, ny, nx) transform = from_origin(1, 2, 0.5, 2.) with rasterio.open( @@ -2262,10 +2268,10 @@ def test_indexing(self): dx, dy = s.res[0], -s.res[1] # ref - expected = DataArray(data, dims=('band', 'y', 'x'), - coords={'x': (np.arange(nx)*0.5 + 1) + dx/2, - 'y': (-np.arange(ny)*2 + 2) + dy/2, - 'band': [1, 2, 3]}) + expected = DataArray(data, dims=('band', 'y', 'x'), coords={ + 'x': (np.arange(nx) * 0.5 + 1) + dx / 2, + 'y': (-np.arange(ny) * 2 + 2) + dy / 2, + 'band': [1, 2, 3]}) with xr.open_rasterio(tmp_file, cache=False) as actual: @@ -2341,7 +2347,7 @@ def test_caching(self): with create_tmp_file(suffix='.tif') as tmp_file: # data nx, ny, nz = 8, 10, 3 - data = np.arange(nx*ny*nz, + data = np.arange(nx * ny * nz, dtype=rasterio.float32).reshape(nz, ny, nx) transform = from_origin(1, 2, 0.5, 2.) with rasterio.open( @@ -2354,10 +2360,11 @@ def test_caching(self): dx, dy = s.res[0], -s.res[1] # ref - expected = DataArray(data, dims=('band', 'y', 'x'), - coords={'x': (np.arange(nx)*0.5 + 1) + dx/2, - 'y': (-np.arange(ny)*2 + 2) + dy/2, - 'band': [1, 2, 3]}) + expected = DataArray( + data, dims=('band', 'y', 'x'), coords={ + 'x': (np.arange(nx) * 0.5 + 1) + dx / 2, + 'y': (-np.arange(ny) * 2 + 2) + dy / 2, + 'band': [1, 2, 3]}) # Cache is the default with xr.open_rasterio(tmp_file) as actual: @@ -2385,7 +2392,7 @@ def test_chunks(self): with create_tmp_file(suffix='.tif') as tmp_file: # data nx, ny, nz = 8, 10, 3 - data = np.arange(nx*ny*nz, + data = np.arange(nx * ny * nz, dtype=rasterio.float32).reshape(nz, ny, nx) transform = from_origin(1, 2, 0.5, 2.) with rasterio.open( @@ -2405,10 +2412,10 @@ def test_chunks(self): assert 'open_rasterio' in actual.data.name # ref - expected = DataArray(data, dims=('band', 'y', 'x'), - coords={'x': np.arange(nx)*0.5 + 1 + dx/2, - 'y': -np.arange(ny)*2 + 2 + dy/2, - 'band': [1, 2, 3]}) + expected = DataArray(data, dims=('band', 'y', 'x'), coords={ + 'x': np.arange(nx) * 0.5 + 1 + dx / 2, + 'y': -np.arange(ny) * 2 + 2 + dy / 2, + 'band': [1, 2, 3]}) # do some arithmetic ac = actual.mean() @@ -2427,7 +2434,7 @@ def test_ENVI_tags(self): with create_tmp_file(suffix='.dat') as tmp_file: # data nx, ny, nz = 4, 3, 3 - data = np.arange(nx*ny*nz, + data = np.arange(nx * ny * nz, dtype=rasterio.float32).reshape(nz, ny, nx) transform = from_origin(5000, 80000, 1000, 2000.) with rasterio.open( @@ -2438,25 +2445,22 @@ def test_ENVI_tags(self): transform=transform, dtype=rasterio.float32) as s: s.update_tags( - ns='ENVI', - description='{Tagged file}', - wavelength='{123.000000, 234.234000, 345.345678}', - fwhm='{1.000000, 0.234000, 0.000345}') + ns='ENVI', + description='{Tagged file}', + wavelength='{123.000000, 234.234000, 345.345678}', + fwhm='{1.000000, 0.234000, 0.000345}') s.write(data) dx, dy = s.res[0], -s.res[1] # Tests - expected = DataArray(data, dims=('band', 'y', 'x'), - coords={ - 'band': [1, 2, 3], - 'y': -np.arange(ny) * 2000 + 80000 + dy/2, - 'x': np.arange(nx) * 1000 + 5000 + dx/2, - 'wavelength': ( - 'band', - np.array([123, 234.234, 345.345678])), - 'fwhm': ( - 'band', - np.array([1, 0.234, 0.000345]))}) + coords = { + 'band': [1, 2, 3], + 'y': -np.arange(ny) * 2000 + 80000 + dy / 2, + 'x': np.arange(nx) * 1000 + 5000 + dx / 2, + 'wavelength': ('band', np.array([123, 234.234, 345.345678])), + 'fwhm': ('band', np.array([1, 0.234, 0.000345])), + } + expected = DataArray(data, dims=('band', 'y', 'x'), coords=coords) with xr.open_rasterio(tmp_file) as rioda: assert_allclose(rioda, expected) diff --git a/xarray/tests/test_coding.py b/xarray/tests/test_coding.py index 9981a63e105..d1b54fca95e 100644 --- a/xarray/tests/test_coding.py +++ b/xarray/tests/test_coding.py @@ -1,11 +1,10 @@ import numpy as np -import pytest import xarray as xr from xarray.core.pycompat import suppress from xarray.coding import variables -from . import requires_dask, raises_regex, assert_identical +from . import requires_dask, assert_identical with suppress(ImportError): import dask.array as da diff --git a/xarray/tests/test_coding_times.py b/xarray/tests/test_coding_times.py index f4c726355a5..6046e280136 100644 --- a/xarray/tests/test_coding_times.py +++ b/xarray/tests/test_coding_times.py @@ -24,29 +24,29 @@ class TestDatetime(TestCase): def test_cf_datetime(self): import netCDF4 as nc4 for num_dates, units in [ - (np.arange(10), 'days since 2000-01-01'), - (np.arange(10).reshape(2, 5), 'days since 2000-01-01'), - (12300 + np.arange(5), 'hours since 1680-01-01 00:00:00'), - # here we add a couple minor formatting errors to test - # the robustness of the parsing algorithm. - (12300 + np.arange(5), 'hour since 1680-01-01 00:00:00'), - (12300 + np.arange(5), u'Hour since 1680-01-01 00:00:00'), - (12300 + np.arange(5), ' Hour since 1680-01-01 00:00:00 '), - (10, 'days since 2000-01-01'), - ([10], 'daYs since 2000-01-01'), - ([[10]], 'days since 2000-01-01'), - ([10, 10], 'days since 2000-01-01'), - (np.array(10), 'days since 2000-01-01'), - (0, 'days since 1000-01-01'), - ([0], 'days since 1000-01-01'), - ([[0]], 'days since 1000-01-01'), - (np.arange(2), 'days since 1000-01-01'), - (np.arange(0, 100000, 20000), 'days since 1900-01-01'), - (17093352.0, 'hours since 1-1-1 00:00:0.0'), - ([0.5, 1.5], 'hours since 1900-01-01T00:00:00'), - (0, 'milliseconds since 2000-01-01T00:00:00'), - (0, 'microseconds since 2000-01-01T00:00:00'), - ]: + (np.arange(10), 'days since 2000-01-01'), + (np.arange(10).reshape(2, 5), 'days since 2000-01-01'), + (12300 + np.arange(5), 'hours since 1680-01-01 00:00:00'), + # here we add a couple minor formatting errors to test + # the robustness of the parsing algorithm. + (12300 + np.arange(5), 'hour since 1680-01-01 00:00:00'), + (12300 + np.arange(5), u'Hour since 1680-01-01 00:00:00'), + (12300 + np.arange(5), ' Hour since 1680-01-01 00:00:00 '), + (10, 'days since 2000-01-01'), + ([10], 'daYs since 2000-01-01'), + ([[10]], 'days since 2000-01-01'), + ([10, 10], 'days since 2000-01-01'), + (np.array(10), 'days since 2000-01-01'), + (0, 'days since 1000-01-01'), + ([0], 'days since 1000-01-01'), + ([[0]], 'days since 1000-01-01'), + (np.arange(2), 'days since 1000-01-01'), + (np.arange(0, 100000, 20000), 'days since 1900-01-01'), + (17093352.0, 'hours since 1-1-1 00:00:0.0'), + ([0.5, 1.5], 'hours since 1900-01-01T00:00:00'), + (0, 'milliseconds since 2000-01-01T00:00:00'), + (0, 'microseconds since 2000-01-01T00:00:00'), + ]: for calendar in ['standard', 'gregorian', 'proleptic_gregorian']: expected = _ensure_naive_tz( nc4.num2date(num_dates, units, calendar)) @@ -224,12 +224,12 @@ def test_decode_non_standard_calendar_fallback(self): @requires_netCDF4 def test_cf_datetime_nan(self): for num_dates, units, expected_list in [ - ([np.nan], 'days since 2000-01-01', ['NaT']), - ([np.nan, 0], 'days since 2000-01-01', - ['NaT', '2000-01-01T00:00:00Z']), - ([np.nan, 0, 1], 'days since 2000-01-01', - ['NaT', '2000-01-01T00:00:00Z', '2000-01-02T00:00:00Z']), - ]: + ([np.nan], 'days since 2000-01-01', ['NaT']), + ([np.nan, 0], 'days since 2000-01-01', + ['NaT', '2000-01-01T00:00:00Z']), + ([np.nan, 0, 1], 'days since 2000-01-01', + ['NaT', '2000-01-01T00:00:00Z', '2000-01-02T00:00:00Z']), + ]: with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'All-NaN') actual = coding.times.decode_cf_datetime(num_dates, units) diff --git a/xarray/tests/test_combine.py b/xarray/tests/test_combine.py index b2bbba34fef..a72532086da 100644 --- a/xarray/tests/test_combine.py +++ b/xarray/tests/test_combine.py @@ -256,7 +256,8 @@ def test_concat(self): stacked = concat(grouped, ds.indexes['x']) self.assertDataArrayIdentical(foo, stacked) - actual = concat([foo[0], foo[1]], pd.Index([0, 1])).reset_coords(drop=True) + actual = concat([foo[0], foo[1]], pd.Index([0, 1]) + ).reset_coords(drop=True) expected = foo[:2].rename({'x': 'concat_dim'}) self.assertDataArrayIdentical(expected, actual) diff --git a/xarray/tests/test_computation.py b/xarray/tests/test_computation.py index 430a1a027cb..23e77b83455 100644 --- a/xarray/tests/test_computation.py +++ b/xarray/tests/test_computation.py @@ -488,10 +488,10 @@ def add(a, b, keep_attrs): actual = add(a, b, keep_attrs=True) assert_identical(actual.attrs, a.attrs) - a = xr.Dataset({'x': ('x', [1, 2]), 'x': [0, 1]}) + a = xr.Dataset({'x': [0, 1]}) a.attrs['attr'] = 'ds' a.x.attrs['attr'] = 'da' - b = xr.Dataset({'x': ('x', [1, 1]), 'x': [0, 1]}) + b = xr.Dataset({'x': [0, 1]}) actual = add(a, b, keep_attrs=False) assert not actual.attrs @@ -666,8 +666,8 @@ def test_apply_dask_multiple_inputs(): import dask.array as da def covariance(x, y): - return ((x - x.mean(axis=-1, keepdims=True)) - * (y - y.mean(axis=-1, keepdims=True))).mean(axis=-1) + return ((x - x.mean(axis=-1, keepdims=True)) * + (y - y.mean(axis=-1, keepdims=True))).mean(axis=-1) rs = np.random.RandomState(42) array1 = da.from_array(rs.randn(4, 4), chunks=(2, 4)) diff --git a/xarray/tests/test_conventions.py b/xarray/tests/test_conventions.py index e482fd44af6..1d0beb33028 100644 --- a/xarray/tests/test_conventions.py +++ b/xarray/tests/test_conventions.py @@ -21,7 +21,6 @@ B = IndexerMaker(indexing.BasicIndexer) -O = IndexerMaker(indexing.OuterIndexer) V = IndexerMaker(indexing.VectorizedIndexer) @@ -189,7 +188,7 @@ def test_incompatible_attributes(self): Variable(['t'], pd.to_timedelta(['1 day']), {'units': 'foobar'}), Variable(['t'], [0, 1, 2], {'add_offset': 0}, {'add_offset': 2}), Variable(['t'], [0, 1, 2], {'_FillValue': 0}, {'_FillValue': 2}), - ] + ] for var in invalid_vars: with pytest.raises(ValueError): conventions.encode_cf_variable(var) @@ -282,12 +281,14 @@ def test_decode_cf_with_drop_variables(self): original = Dataset({ 't': ('t', [0, 1, 2], {'units': 'days since 2000-01-01'}), 'x': ("x", [9, 8, 7], {'units': 'km'}), - 'foo': (('t', 'x'), [[0, 0, 0], [1, 1, 1], [2, 2, 2]], {'units': 'bar'}), + 'foo': (('t', 'x'), [[0, 0, 0], [1, 1, 1], [2, 2, 2]], + {'units': 'bar'}), 'y': ('t', [5, 10, -999], {'_FillValue': -999}) }) expected = Dataset({ 't': pd.date_range('2000-01-01', periods=3), - 'foo': (('t', 'x'), [[0, 0, 0], [1, 1, 1], [2, 2, 2]], {'units': 'bar'}), + 'foo': (('t', 'x'), [[0, 0, 0], [1, 1, 1], [2, 2, 2]], + {'units': 'bar'}), 'y': ('t', [5, 10, np.nan]) }) actual = conventions.decode_cf(original, drop_variables=("x",)) @@ -337,6 +338,7 @@ class NullWrapper(utils.NDArrayMixin): Just for testing, this lets us create a numpy array directly but make it look like its not in memory yet. """ + def __init__(self, array): self.array = array diff --git a/xarray/tests/test_dask.py b/xarray/tests/test_dask.py index 0e8a1827026..2e9179d429a 100644 --- a/xarray/tests/test_dask.py +++ b/xarray/tests/test_dask.py @@ -19,8 +19,8 @@ from xarray.tests import mock dask = pytest.importorskip('dask') -import dask.array as da -import dask.dataframe as dd +import dask.array as da # noqa: E402 # allow importorskip call above this +import dask.dataframe as dd # noqa: E402 class DaskTestCase(TestCase): @@ -171,7 +171,8 @@ def test_missing_values(self): eager_var = Variable('x', values) lazy_var = Variable('x', data) self.assertLazyAndIdentical(eager_var, lazy_var.fillna(lazy_var)) - self.assertLazyAndIdentical(Variable('x', range(4)), lazy_var.fillna(2)) + self.assertLazyAndIdentical(Variable('x', range(4)), + lazy_var.fillna(2)) self.assertLazyAndIdentical(eager_var.count(), lazy_var.count()) def test_concat(self): @@ -182,7 +183,8 @@ def test_concat(self): self.assertLazyAndIdentical(u[:2], Variable.concat([u[0], v[1]], 'x')) self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], u[1]], 'x')) self.assertLazyAndIdentical( - u[:3], Variable.concat([v[[0, 2]], v[[1]]], 'x', positions=[[0, 2], [1]])) + u[:3], + Variable.concat([v[[0, 2]], v[[1]]], 'x', positions=[[0, 2], [1]])) def test_missing_methods(self): v = self.lazy_var @@ -331,13 +333,15 @@ def test_concat_loads_variables(self): assert isinstance(out['d'].data, np.ndarray) assert isinstance(out['c'].data, np.ndarray) - out = xr.concat([ds1, ds2, ds3], dim='n', data_vars='all', coords='all') + out = xr.concat( + [ds1, ds2, ds3], dim='n', data_vars='all', coords='all') # no extra kernel calls assert kernel_call_count == 6 assert isinstance(out['d'].data, dask.array.Array) assert isinstance(out['c'].data, dask.array.Array) - out = xr.concat([ds1, ds2, ds3], dim='n', data_vars=['d'], coords=['c']) + out = xr.concat( + [ds1, ds2, ds3], dim='n', data_vars=['d'], coords=['c']) # no extra kernel calls assert kernel_call_count == 6 assert isinstance(out['d'].data, dask.array.Array) @@ -358,7 +362,8 @@ def test_concat_loads_variables(self): # When the test for different turns true halfway through, # stop computing variables as it would not have any benefit - ds4 = Dataset(data_vars={'d': ('x', [2.0])}, coords={'c': ('x', [2.0])}) + ds4 = Dataset(data_vars={'d': ('x', [2.0])}, + coords={'c': ('x', [2.0])}) out = xr.concat([ds1, ds2, ds4, ds3], dim='n', data_vars='different', coords='different') # the variables of ds1 and ds2 were computed, but those of ds3 didn't @@ -653,7 +658,7 @@ def test_to_dask_dataframe_2D_set_index(self): def test_to_dask_dataframe_coordinates(self): # Test if coordinate is also a dask array x = da.from_array(np.random.randn(10), chunks=4) - t = da.from_array(np.arange(10)*2, chunks=4) + t = da.from_array(np.arange(10) * 2, chunks=4) ds = Dataset(OrderedDict([('a', ('t', x)), ('t', ('t', t))])) @@ -829,7 +834,8 @@ def test_dataarray_with_dask_coords(): (array2,) = dask.compute(array) assert not dask.is_dask_collection(array2) - assert all(isinstance(v._variable.data, np.ndarray) for v in array2.coords.values()) + assert all(isinstance(v._variable.data, np.ndarray) + for v in array2.coords.values()) def test_basic_compute(): diff --git a/xarray/tests/test_dataarray.py b/xarray/tests/test_dataarray.py index eb37cbe2b26..296873dec45 100644 --- a/xarray/tests/test_dataarray.py +++ b/xarray/tests/test_dataarray.py @@ -203,11 +203,11 @@ def test_sizes(self): def test_encoding(self): expected = {'foo': 'bar'} self.dv.encoding['foo'] = 'bar' - assert expected, self.d == encoding + assert expected == self.dv.encoding expected = {'baz': 0} self.dv.encoding = expected - assert expected, self.d == encoding + assert expected == self.dv.encoding self.assertIsNot(expected, self.dv.encoding) def test_constructor(self): @@ -437,7 +437,7 @@ def test_getitem(self): self.assertDataArrayIdentical(self.ds['x'], x) self.assertDataArrayIdentical(self.ds['y'], y) - I = ReturnItem() + I = ReturnItem() # noqa: E741 # allow ambiguous name for i in [I[:], I[...], I[x.values], I[x.variable], I[x], I[x, y], I[x.values > -1], I[x.variable > -1], I[x > -1], I[x > -1, y > -1]]: @@ -707,7 +707,7 @@ def test_isel_fancy(self): # make sure we're raising errors in the right places with raises_regex(IndexError, - 'Dimensions of indexers mismatch'): + 'Dimensions of indexers mismatch'): da.isel(y=(('points', ), [1, 2]), x=(('points', ), [1, 2, 3])) # tests using index or DataArray as indexers @@ -869,20 +869,20 @@ def test_isel_points(self): # make sure we're raising errors in the right places with raises_regex(ValueError, - 'All indexers must be the same length'): + 'All indexers must be the same length'): da.isel_points(y=[1, 2], x=[1, 2, 3]) with raises_regex(ValueError, - 'dimension bad_key does not exist'): + 'dimension bad_key does not exist'): da.isel_points(bad_key=[1, 2]) with raises_regex(TypeError, 'Indexers must be integers'): da.isel_points(y=[1.5, 2.2]) with raises_regex(TypeError, 'Indexers must be integers'): da.isel_points(x=[1, 2, 3], y=slice(3)) with raises_regex(ValueError, - 'Indexers must be 1 dimensional'): + 'Indexers must be 1 dimensional'): da.isel_points(y=1, x=2) with raises_regex(ValueError, - 'Existing dimension names are not'): + 'Existing dimension names are not'): da.isel_points(y=[1, 2], x=[1, 2], dim='x') # using non string dims @@ -2205,7 +2205,7 @@ def test_resample_drop_nondim_coords(self): ys = np.arange(3) times = pd.date_range('2000-01-01', freq='6H', periods=5) data = np.tile(np.arange(5), (6, 3, 1)) - xx, yy = np.meshgrid(xs*5, ys*2.5) + xx, yy = np.meshgrid(xs * 5, ys * 2.5) tt = np.arange(len(times), dtype=int) array = DataArray(data, {'time': times, 'x': xs, 'y': ys}, @@ -2381,7 +2381,7 @@ def test_upsample_interpolate(self): expected_times = times.to_series().resample('1H').asfreq().index # Split the times into equal sub-intervals to simulate the 6 hour # to 1 hour up-sampling - new_times_idx = np.linspace(0, len(times)-1, len(times)*5) + new_times_idx = np.linspace(0, len(times) - 1, len(times) * 5) for kind in ['linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic']: actual = array.resample(time='1H').interpolate(kind) @@ -2420,7 +2420,7 @@ def test_upsample_interpolate_dask(self): ('x', 'y', 'time')) with raises_regex(TypeError, - "dask arrays are not yet supported"): + "dask arrays are not yet supported"): array.resample(time='1H').interpolate('linear') def test_align(self): @@ -2890,8 +2890,8 @@ def test_to_and_from_iris(self): for coord, orginal_key in zip((actual.coords()), original.coords): original_coord = original.coords[orginal_key] self.assertEqual(coord.var_name, original_coord.name) - self.assertArrayEqual(coord.points, - CFDatetimeCoder().encode(original_coord).values) + self.assertArrayEqual( + coord.points, CFDatetimeCoder().encode(original_coord).values) self.assertEqual(actual.coord_dims(coord), original.get_axis_num (original.coords[coord.var_name].dims)) @@ -2930,13 +2930,14 @@ def test_to_and_from_iris_dask(self): coord_dict['distance2'] = ('distance', [0, 1], {'foo': 'bar'}) coord_dict['time2'] = (('distance', 'time'), [[0, 1, 2], [2, 3, 4]]) - original = DataArray(da.from_array( - np.arange(-1, 5, dtype='float').reshape(2, 3), 3), coord_dict, - name='Temperature', - attrs={'baz': 123, 'units': 'Kelvin', - 'standard_name': 'fire_temperature', - 'long_name': 'Fire Temperature'}, - dims=('distance', 'time')) + original = DataArray( + da.from_array(np.arange(-1, 5, dtype='float').reshape(2, 3), 3), + coord_dict, + name='Temperature', + attrs=dict(baz=123, units='Kelvin', + standard_name='fire_temperature', + long_name='Fire Temperature'), + dims=('distance', 'time')) # Set a bad value to test the masking logic original.data = da.ma.masked_less(original.data, 0) @@ -2962,8 +2963,8 @@ def test_to_and_from_iris_dask(self): for coord, orginal_key in zip((actual.coords()), original.coords): original_coord = original.coords[orginal_key] self.assertEqual(coord.var_name, original_coord.name) - self.assertArrayEqual(coord.points, - CFDatetimeCoder().encode(original_coord).values) + self.assertArrayEqual( + coord.points, CFDatetimeCoder().encode(original_coord).values) self.assertEqual(actual.coord_dims(coord), original.get_axis_num (original.coords[coord.var_name].dims)) @@ -3257,10 +3258,10 @@ def test_rank(self): self.assertDataArrayEqual(ar.rank('dim_0'), expect_0) self.assertDataArrayEqual(ar.rank('dim_1'), expect_1) # int - x = DataArray([3,2,1]) + x = DataArray([3, 2, 1]) self.assertDataArrayEqual(x.rank('dim_0'), x) # str - y = DataArray(['c', 'b', 'a']) + y = DataArray(['c', 'b', 'a']) self.assertDataArrayEqual(y.rank('dim_0'), x) x = DataArray([3.0, 1.0, np.nan, 2.0, 4.0], dims=('z',)) diff --git a/xarray/tests/test_dataset.py b/xarray/tests/test_dataset.py index 4f592d53ee5..1a4214e1090 100644 --- a/xarray/tests/test_dataset.py +++ b/xarray/tests/test_dataset.py @@ -103,7 +103,7 @@ def test_repr(self): var2 (dim1, dim2) float64 1.162 -1.097 -2.123 1.04 -0.4034 -0.126 ... var3 (dim3, dim1) float64 0.5565 -0.2121 0.4563 1.545 -0.2397 0.1433 ... Attributes: - foo: bar""") % data['dim3'].dtype + foo: bar""") % data['dim3'].dtype # noqa: E501 actual = '\n'.join(x.rstrip() for x in repr(data).split('\n')) print(actual) self.assertEqual(expected, actual) @@ -171,7 +171,8 @@ def test_repr_multiindex(self): def test_repr_period_index(self): data = create_test_data(seed=456) - data.coords['time'] = pd.period_range('2000-01-01', periods=20, freq='B') + data.coords['time'] = pd.period_range( + '2000-01-01', periods=20, freq='B') # check that creating the repr doesn't raise an error #GH645 repr(data) @@ -319,7 +320,7 @@ def test_constructor_pandas_sequence(self): ds = self.make_example_math_dataset() pandas_objs = OrderedDict( - (var_name, ds[var_name].to_pandas()) for var_name in ['foo','bar'] + (var_name, ds[var_name].to_pandas()) for var_name in ['foo', 'bar'] ) ds_based_on_pandas = Dataset(pandas_objs, ds.coords, attrs=ds.attrs) del ds_based_on_pandas['x'] @@ -336,17 +337,16 @@ def test_constructor_pandas_single(self): das = [ DataArray(np.random.rand(4), dims=['a']), # series - DataArray(np.random.rand(4,3), dims=['a', 'b']), # df - DataArray(np.random.rand(4,3,2), dims=['a','b','c']), # panel - ] + DataArray(np.random.rand(4, 3), dims=['a', 'b']), # df + DataArray(np.random.rand(4, 3, 2), dims=['a', 'b', 'c']), # panel + ] - for da in das: - pandas_obj = da.to_pandas() + for a in das: + pandas_obj = a.to_pandas() ds_based_on_pandas = Dataset(pandas_obj) for dim in ds_based_on_pandas.data_vars: self.assertArrayEqual(ds_based_on_pandas[dim], pandas_obj[dim]) - def test_constructor_compat(self): data = OrderedDict([('x', DataArray(0, coords={'y': 1})), ('y', ('z', [1, 1, 1]))]) @@ -523,8 +523,10 @@ def test_coords_properties(self): self.assertItemsEqual(['x', 'y', 'a', 'b'], list(data.coords)) - self.assertVariableIdentical(data.coords['x'].variable, data['x'].variable) - self.assertVariableIdentical(data.coords['y'].variable, data['y'].variable) + self.assertVariableIdentical(data.coords['x'].variable, + data['x'].variable) + self.assertVariableIdentical(data.coords['y'].variable, + data['y'].variable) self.assertIn('x', data.coords) self.assertIn('a', data.coords) @@ -931,7 +933,7 @@ def test_isel_fancy(self): dim1=(('points', ), pdim1))) # make sure we're raising errors in the right places with raises_regex(IndexError, - 'Dimensions of indexers mismatch'): + 'Dimensions of indexers mismatch'): data.isel(dim1=(('points', ), [1, 2]), dim2=(('points', ), [1, 2, 3])) with raises_regex(TypeError, 'cannot use a Dataset'): @@ -1197,8 +1199,8 @@ def test_sel_dataarray_mindex(self): mds.sel(one=['a', 'b']) with raises_regex(ValueError, 'Vectorized selection is ' - 'not available along MultiIndex variable:' - ' x'): + 'not available along MultiIndex variable:' + ' x'): mds.sel(x=xr.DataArray([np.array(midx[:2]), np.array(midx[-2:])], dims=['a', 'b'])) @@ -1250,20 +1252,20 @@ def test_isel_points(self): # make sure we're raising errors in the right places with raises_regex(ValueError, - 'All indexers must be the same length'): + 'All indexers must be the same length'): data.isel_points(dim1=[1, 2], dim2=[1, 2, 3]) with raises_regex(ValueError, - 'dimension bad_key does not exist'): + 'dimension bad_key does not exist'): data.isel_points(bad_key=[1, 2]) with raises_regex(TypeError, 'Indexers must be integers'): data.isel_points(dim1=[1.5, 2.2]) with raises_regex(TypeError, 'Indexers must be integers'): data.isel_points(dim1=[1, 2, 3], dim2=slice(3)) with raises_regex(ValueError, - 'Indexers must be 1 dimensional'): + 'Indexers must be 1 dimensional'): data.isel_points(dim1=1, dim2=2) with raises_regex(ValueError, - 'Existing dimension names are not valid'): + 'Existing dimension names are not valid'): data.isel_points(dim1=[1, 2], dim2=[1, 2], dim='dim2') # test to be sure we keep around variables that were not indexed @@ -1285,7 +1287,7 @@ def test_isel_points(self): self.assertDataArrayIdentical(actual['station'].drop(['dim2']), stations['station']) - # make sure we get the default 'points' coordinate when a list is passed + # make sure we get the default 'points' coordinate when passed a list actual = data.isel_points(dim1=stations['dim1s'], dim2=stations['dim2s'], dim=['A', 'B', 'C']) @@ -1295,7 +1297,8 @@ def test_isel_points(self): # test index actual = data.isel_points(dim1=stations['dim1s'].values, dim2=stations['dim2s'].values, - dim=pd.Index(['A', 'B', 'C'], name='letters')) + dim=pd.Index(['A', 'B', 'C'], + name='letters')) assert 'letters' in actual.coords # can pass a numpy array @@ -1625,7 +1628,8 @@ def test_align(self): self.assertDatasetIdentical(left2, right2) left2, right2 = align(left, right, join='outer') - self.assertVariableEqual(left2['dim3'].variable, right2['dim3'].variable) + self.assertVariableEqual(left2['dim3'].variable, + right2['dim3'].variable) self.assertArrayEqual(left2['dim3'], union) self.assertDatasetIdentical(left2.sel(dim3=intersection), right2.sel(dim3=intersection)) @@ -1633,15 +1637,18 @@ def test_align(self): self.assertTrue(np.isnan(right2['var3'][:2]).all()) left2, right2 = align(left, right, join='left') - self.assertVariableEqual(left2['dim3'].variable, right2['dim3'].variable) + self.assertVariableEqual(left2['dim3'].variable, + right2['dim3'].variable) self.assertVariableEqual(left2['dim3'].variable, left['dim3'].variable) self.assertDatasetIdentical(left2.sel(dim3=intersection), right2.sel(dim3=intersection)) self.assertTrue(np.isnan(right2['var3'][:2]).all()) left2, right2 = align(left, right, join='right') - self.assertVariableEqual(left2['dim3'].variable, right2['dim3'].variable) - self.assertVariableEqual(left2['dim3'].variable, right['dim3'].variable) + self.assertVariableEqual(left2['dim3'].variable, + right2['dim3'].variable) + self.assertVariableEqual(left2['dim3'].variable, + right['dim3'].variable) self.assertDatasetIdentical(left2.sel(dim3=intersection), right2.sel(dim3=intersection)) self.assertTrue(np.isnan(left2['var3'][-2:]).all()) @@ -1685,7 +1692,7 @@ def test_align_nocopy(self): y = Dataset({'foo': DataArray([1, 2], coords=[('x', [1, 2])])}) expected_x2 = x expected_y2 = Dataset({'foo': DataArray([1, 2, np.nan], - coords=[('x', [1, 2, 3])])}) + coords=[('x', [1, 2, 3])])}) x2, y2 = align(x, y, copy=False, join='outer') self.assertDatasetIdentical(expected_x2, x2) @@ -1695,14 +1702,15 @@ def test_align_nocopy(self): x2, y2 = align(x, y, copy=True, join='outer') self.assertDatasetIdentical(expected_x2, x2) self.assertDatasetIdentical(expected_y2, y2) - assert source_ndarray(x['foo'].data) is not source_ndarray(x2['foo'].data) + assert source_ndarray(x['foo'].data) is not \ + source_ndarray(x2['foo'].data) def test_align_indexes(self): x = Dataset({'foo': DataArray([1, 2, 3], dims='x', - coords=[('x', [1, 2, 3])])}) + coords=[('x', [1, 2, 3])])}) x2, = align(x, indexes={'x': [2, 3, 1]}) expected_x2 = Dataset({'foo': DataArray([2, 3, 1], dims='x', - coords={'x': [2, 3, 1]})}) + coords={'x': [2, 3, 1]})}) self.assertDatasetIdentical(expected_x2, x2) def test_align_non_unique(self): @@ -1720,7 +1728,7 @@ def test_broadcast(self): expected = Dataset({'foo': (('x', 'y'), [[0, 0]]), 'bar': (('x', 'y'), [[1, 1]]), 'baz': (('x', 'y'), [[2, 3]])}, - {'c': ('x', [4])}) + {'c': ('x', [4])}) actual, = broadcast(ds) self.assertDatasetIdentical(expected, actual) @@ -1745,34 +1753,42 @@ def test_broadcast_nocopy(self): actual_x, = broadcast(x) self.assertDatasetIdentical(x, actual_x) - assert source_ndarray(actual_x['foo'].data) is source_ndarray(x['foo'].data) + assert source_ndarray(actual_x['foo'].data) is \ + source_ndarray(x['foo'].data) actual_x, actual_y = broadcast(x, y) self.assertDatasetIdentical(x, actual_x) - assert source_ndarray(actual_x['foo'].data) is source_ndarray(x['foo'].data) + assert source_ndarray(actual_x['foo'].data) is \ + source_ndarray(x['foo'].data) def test_broadcast_exclude(self): x = Dataset({ - 'foo': DataArray([[1, 2],[3, 4]], dims=['x', 'y'], coords={'x': [1, 2], 'y': [3, 4]}), + 'foo': DataArray([[1, 2], [3, 4]], dims=['x', 'y'], + coords={'x': [1, 2], 'y': [3, 4]}), 'bar': DataArray(5), }) y = Dataset({ - 'foo': DataArray([[1, 2]], dims=['z', 'y'], coords={'z': [1], 'y': [5, 6]}), + 'foo': DataArray([[1, 2]], dims=['z', 'y'], + coords={'z': [1], 'y': [5, 6]}), }) x2, y2 = broadcast(x, y, exclude=['y']) expected_x2 = Dataset({ - 'foo': DataArray([[[1, 2]], [[3, 4]]], dims=['x', 'z', 'y'], coords={'z': [1], 'x': [1, 2], 'y': [3, 4]}), - 'bar': DataArray([[5], [5]], dims=['x', 'z'], coords={'x': [1, 2], 'z': [1]}), + 'foo': DataArray([[[1, 2]], [[3, 4]]], dims=['x', 'z', 'y'], + coords={'z': [1], 'x': [1, 2], 'y': [3, 4]}), + 'bar': DataArray([[5], [5]], dims=['x', 'z'], + coords={'x': [1, 2], 'z': [1]}), }) expected_y2 = Dataset({ - 'foo': DataArray([[[1, 2]], [[1, 2]]], dims=['x', 'z', 'y'], coords={'z': [1], 'x': [1, 2], 'y': [5, 6]}), + 'foo': DataArray([[[1, 2]], [[1, 2]]], dims=['x', 'z', 'y'], + coords={'z': [1], 'x': [1, 2], 'y': [5, 6]}), }) self.assertDatasetIdentical(expected_x2, x2) self.assertDatasetIdentical(expected_y2, y2) def test_broadcast_misaligned(self): - x = Dataset({'foo': DataArray([1, 2, 3], coords=[('x', [-1, -2, -3])])}) + x = Dataset({'foo': DataArray([1, 2, 3], + coords=[('x', [-1, -2, -3])])}) y = Dataset({'bar': DataArray([[1, 2], [3, 4]], dims=['y', 'x'], coords={'y': [1, 2], 'x': [10, -3]})}) x2, y2 = broadcast(x, y) @@ -1923,7 +1939,8 @@ def test_rename_inplace(self): def test_swap_dims(self): original = Dataset({'x': [1, 2, 3], 'y': ('x', list('abc')), 'z': 42}) - expected = Dataset({'z': 42}, {'x': ('y', [1, 2, 3]), 'y': list('abc')}) + expected = Dataset({'z': 42}, + {'x': ('y', [1, 2, 3]), 'y': list('abc')}) actual = original.swap_dims({'x': 'y'}) self.assertDatasetIdentical(expected, actual) self.assertIsInstance(actual.variables['y'], IndexVariable) @@ -2225,14 +2242,15 @@ def test_virtual_variable_multiindex(self): def test_time_season(self): ds = Dataset({'t': pd.date_range('2000-01-01', periods=12, freq='M')}) - expected = ['DJF'] * 2 + ['MAM'] * 3 + ['JJA'] * 3 + ['SON'] * 3 + ['DJF'] - self.assertArrayEqual(expected, ds['t.season']) + seas = ['DJF'] * 2 + ['MAM'] * 3 + ['JJA'] * 3 + ['SON'] * 3 + ['DJF'] + self.assertArrayEqual(seas, ds['t.season']) def test_slice_virtual_variable(self): data = create_test_data() self.assertVariableEqual(data['time.dayofyear'][:10].variable, Variable(['time'], 1 + np.arange(10))) - self.assertVariableEqual(data['time.dayofyear'][0].variable, Variable([], 1)) + self.assertVariableEqual(data['time.dayofyear'][0].variable, + Variable([], 1)) def test_setitem(self): # assign a variable @@ -2249,7 +2267,7 @@ def test_setitem(self): self.assertDatasetIdentical(data1, data2) # can't assign an ND array without dimensions with raises_regex(ValueError, - 'without explicit dimension names'): + 'without explicit dimension names'): data2['C'] = var.values.reshape(2, 4) # but can assign a 1D array data1['C'] = var.values @@ -2456,7 +2474,9 @@ def test_groupby(self): self.assertEqual(actual[0], expected[0]) self.assertDatasetEqual(actual[1], expected[1]) - identity = lambda x: x + def identity(x): + return x + for k in ['x', 'c', 'y']: actual = data.groupby(k, squeeze=False).apply(identity) self.assertDatasetEqual(data, actual) @@ -2512,7 +2532,8 @@ def test_groupby_reduce(self): self.assertDatasetAllClose(expected, actual) def test_groupby_math(self): - reorder_dims = lambda x: x.transpose('dim1', 'dim2', 'dim3', 'time') + def reorder_dims(x): + return x.transpose('dim1', 'dim2', 'dim3', 'time') ds = create_test_data() ds['dim1'] = ds['dim1'] @@ -2583,13 +2604,14 @@ def test_groupby_order(self): ds = Dataset() for vn in ['a', 'b', 'c']: ds[vn] = DataArray(np.arange(10), dims=['t']) - all_vars_ref = list(ds.variables.keys()) data_vars_ref = list(ds.data_vars.keys()) ds = ds.groupby('t').mean() - all_vars = list(ds.variables.keys()) data_vars = list(ds.data_vars.keys()) self.assertEqual(data_vars, data_vars_ref) + # coords are now at the end of the list, so the test below fails + # all_vars = list(ds.variables.keys()) + # all_vars_ref = list(ds.variables.keys()) # self.assertEqual(all_vars, all_vars_ref) def test_resample_and_first(self): @@ -2660,7 +2682,7 @@ def test_resample_drop_nondim_coords(self): ys = np.arange(3) times = pd.date_range('2000-01-01', freq='6H', periods=5) data = np.tile(np.arange(5), (6, 3, 1)) - xx, yy = np.meshgrid(xs*5, ys*2.5) + xx, yy = np.meshgrid(xs * 5, ys * 2.5) tt = np.arange(len(times), dtype=int) array = DataArray(data, {'time': times, 'x': xs, 'y': ys}, @@ -2762,9 +2784,10 @@ def test_to_and_from_dataframe(self): self.assertDatasetIdentical(expected, actual) # GH697 - df = pd.DataFrame({'A' : []}) + df = pd.DataFrame({'A': []}) actual = Dataset.from_dataframe(df) - expected = Dataset({'A': DataArray([], dims=('index',))}, {'index': []}) + expected = Dataset({'A': DataArray([], dims=('index',))}, + {'index': []}) self.assertDatasetIdentical(expected, actual) # regression test for GH278 @@ -2873,7 +2896,7 @@ def test_to_and_from_dict(self): 't': {'data': t, 'dims': 't'}, 'b': {'dims': 't', 'data': y}} with raises_regex(ValueError, "cannot convert dict " - "without the key 'dims'"): + "without the key 'dims'"): Dataset.from_dict(d) def test_to_and_from_dict_with_time_dim(self): @@ -3180,14 +3203,17 @@ def test_where_drop(self): ds.where(np.arange(5) > 1, drop=True) # 1d with odd coordinates - array = DataArray(np.array([2, 7, 1, 8, 3]), coords=[np.array([3, 1, 4, 5, 9])], dims=['x']) - expected = DataArray(np.array([7, 8, 3]), coords=[np.array([1, 5, 9])], dims=['x']) + array = DataArray(np.array([2, 7, 1, 8, 3]), + coords=[np.array([3, 1, 4, 5, 9])], dims=['x']) + expected = DataArray(np.array([7, 8, 3]), coords=[np.array([1, 5, 9])], + dims=['x']) actual = array.where(array > 2, drop=True) self.assertDatasetIdentical(expected, actual) # 1d multiple variables ds = Dataset({'a': (('x'), [0, 1, 2, 3]), 'b': (('x'), [4, 5, 6, 7])}) - expected = Dataset({'a': (('x'), [np.nan, 1, 2, 3]), 'b': (('x'), [4, 5, 6, np.nan])}) + expected = Dataset({'a': (('x'), [np.nan, 1, 2, 3]), + 'b': (('x'), [4, 5, 6, np.nan])}) actual = ds.where((ds > 0) & (ds < 7), drop=True) self.assertDatasetIdentical(expected, actual) @@ -3198,18 +3224,20 @@ def test_where_drop(self): self.assertDatasetIdentical(expected, actual) # 2d with odd coordinates - ds = Dataset({'a': (('x', 'y'), [[0, 1], [2, 3]])}, - coords={'x': [4, 3], 'y': [1, 2], - 'z' : (['x','y'], [[np.e, np.pi], [np.pi*np.e, np.pi*3]])}) + ds = Dataset({'a': (('x', 'y'), [[0, 1], [2, 3]])}, coords={ + 'x': [4, 3], 'y': [1, 2], + 'z': (['x', 'y'], [[np.e, np.pi], [np.pi * np.e, np.pi * 3]])}) expected = Dataset({'a': (('x', 'y'), [[3]])}, - coords={'x': [3], 'y': [2], - 'z' : (['x','y'], [[np.pi*3]])}) + coords={'x': [3], 'y': [2], + 'z': (['x', 'y'], [[np.pi * 3]])}) actual = ds.where(ds > 2, drop=True) self.assertDatasetIdentical(expected, actual) # 2d multiple variables - ds = Dataset({'a': (('x', 'y'), [[0, 1], [2, 3]]), 'b': (('x','y'), [[4, 5], [6, 7]])}) - expected = Dataset({'a': (('x', 'y'), [[np.nan, 1], [2, 3]]), 'b': (('x', 'y'), [[4, 5], [6,7]])}) + ds = Dataset({'a': (('x', 'y'), [[0, 1], [2, 3]]), + 'b': (('x', 'y'), [[4, 5], [6, 7]])}) + expected = Dataset({'a': (('x', 'y'), [[np.nan, 1], [2, 3]]), + 'b': (('x', 'y'), [[4, 5], [6, 7]])}) actual = ds.where(ds > 0, drop=True) self.assertDatasetIdentical(expected, actual) @@ -3263,23 +3291,27 @@ def test_reduce(self): def test_reduce_bad_dim(self): data = create_test_data() with raises_regex(ValueError, 'Dataset does not contain'): - ds = data.mean(dim='bad_dim') + data.mean(dim='bad_dim') def test_reduce_cumsum_test_dims(self): data = create_test_data() for cumfunc in ['cumsum', 'cumprod']: - with raises_regex(ValueError, "must supply either single 'dim' or 'axis'"): - ds = getattr(data, cumfunc)() - with raises_regex(ValueError, "must supply either single 'dim' or 'axis'"): - ds = getattr(data, cumfunc)(dim=['dim1', 'dim2']) + with raises_regex(ValueError, + "must supply either single 'dim' or 'axis'"): + getattr(data, cumfunc)() + with raises_regex(ValueError, + "must supply either single 'dim' or 'axis'"): + getattr(data, cumfunc)(dim=['dim1', 'dim2']) with raises_regex(ValueError, 'Dataset does not contain'): - ds = getattr(data, cumfunc)(dim='bad_dim') + getattr(data, cumfunc)(dim='bad_dim') # ensure dimensions are correct - for reduct, expected in [('dim1', ['dim1', 'dim2', 'dim3', 'time']), - ('dim2', ['dim1', 'dim2', 'dim3', 'time']), - ('dim3', ['dim1', 'dim2', 'dim3', 'time']), - ('time', ['dim1', 'dim2', 'dim3'])]: + for reduct, expected in [ + ('dim1', ['dim1', 'dim2', 'dim3', 'time']), + ('dim2', ['dim1', 'dim2', 'dim3', 'time']), + ('dim3', ['dim1', 'dim2', 'dim3', 'time']), + ('time', ['dim1', 'dim2', 'dim3']) + ]: actual = getattr(data, cumfunc)(dim=reduct).dims print(reduct, actual, expected) self.assertItemsEqual(actual, expected) @@ -3455,7 +3487,7 @@ def scale(x, multiple=1): self.assertDataArrayIdentical(actual['numbers'], data['numbers']) actual = data.apply(np.asarray) - expected = data.drop('time') # time is not used on a data var + expected = data.drop('time') # time is not used on a data var self.assertDatasetEqual(expected, actual) def make_example_math_dataset(self): @@ -3753,21 +3785,25 @@ def test_filter_by_attrs(self): self.assertFalse(bool(new_ds.data_vars)) # Test return one DataArray. - new_ds = ds.filter_by_attrs(standard_name='convective_precipitation_flux') - self.assertEqual(new_ds['precipitation'].standard_name, 'convective_precipitation_flux') + new_ds = ds.filter_by_attrs( + standard_name='convective_precipitation_flux') + self.assertEqual(new_ds['precipitation'].standard_name, + 'convective_precipitation_flux') self.assertDatasetEqual(new_ds['precipitation'], ds['precipitation']) # Test return more than one DataArray. new_ds = ds.filter_by_attrs(standard_name='air_potential_temperature') self.assertEqual(len(new_ds.data_vars), 2) for var in new_ds.data_vars: - self.assertEqual(new_ds[var].standard_name, 'air_potential_temperature') + self.assertEqual(new_ds[var].standard_name, + 'air_potential_temperature') # Test callable. new_ds = ds.filter_by_attrs(height=lambda v: v is not None) self.assertEqual(len(new_ds.data_vars), 2) for var in new_ds.data_vars: - self.assertEqual(new_ds[var].standard_name, 'air_potential_temperature') + self.assertEqual(new_ds[var].standard_name, + 'air_potential_temperature') new_ds = ds.filter_by_attrs(height='10 m') self.assertEqual(len(new_ds.data_vars), 1) @@ -3809,7 +3845,7 @@ def test_full_like(self): # For more thorough tests, see test_variable.py # Note: testing data_vars with mismatched dtypes ds = Dataset({ - 'd1': DataArray([1,2,3], dims=['x'], coords={'x': [10, 20, 30]}), + 'd1': DataArray([1, 2, 3], dims=['x'], coords={'x': [10, 20, 30]}), 'd2': DataArray([1.1, 2.2, 3.3], dims=['y']) }, attrs={'foo': 'bar'}) actual = full_like(ds, 2) @@ -4010,12 +4046,14 @@ def test_dir_expected_attrs(data_set): result = dir(data_set) assert set(result) >= some_expected_attrs + def test_dir_non_string(data_set): # add a numbered key to ensure this doesn't break dir data_set[5] = 'foo' result = dir(data_set) assert not (5 in result) + def test_dir_unicode(data_set): data_set[u'unicode'] = 'uni' result = dir(data_set) diff --git a/xarray/tests/test_distributed.py b/xarray/tests/test_distributed.py index 1d0c51322a1..478b669790e 100644 --- a/xarray/tests/test_distributed.py +++ b/xarray/tests/test_distributed.py @@ -2,13 +2,13 @@ import pytest import xarray as xr -from xarray.core.pycompat import suppress distributed = pytest.importorskip('distributed') da = pytest.importorskip('dask.array') import dask -from distributed.utils_test import cluster, loop, gen_cluster -from distributed.client import futures_of, wait +from distributed.utils_test import cluster, gen_cluster +from distributed.utils_test import loop # flake8: noqa +from distributed.client import futures_of from xarray.tests.test_backends import create_tmp_file, ON_WINDOWS from xarray.tests.test_dataset import create_test_data diff --git a/xarray/tests/test_extensions.py b/xarray/tests/test_extensions.py index 27a838bc1ac..c3f89a5f533 100644 --- a/xarray/tests/test_extensions.py +++ b/xarray/tests/test_extensions.py @@ -15,6 +15,7 @@ @xr.register_dataarray_accessor('example_accessor') class ExampleAccessor(object): """For the pickling tests below.""" + def __init__(self, xarray_obj): self.obj = xarray_obj @@ -26,6 +27,7 @@ def test_register(self): @xr.register_dataarray_accessor('demo') class DemoAccessor(object): """Demo accessor.""" + def __init__(self, xarray_obj): self._obj = xarray_obj diff --git a/xarray/tests/test_formatting.py b/xarray/tests/test_formatting.py index 2c2c9bd614d..f80326c3e84 100644 --- a/xarray/tests/test_formatting.py +++ b/xarray/tests/test_formatting.py @@ -25,7 +25,7 @@ def test_get_indexer_at_least_n_items(self): ((2, 5, 1,), (slice(2), slice(None), slice(None))), ((2, 5, 3,), (0, slice(4), slice(None))), ((2, 3, 3,), (slice(2), slice(None), slice(None))), - ] + ] for shape, expected in cases: actual = formatting._get_indexer_at_least_n_items(shape, 10) self.assertEqual(expected, actual) diff --git a/xarray/tests/test_groupby.py b/xarray/tests/test_groupby.py index 7245685e2c7..f1d80954295 100644 --- a/xarray/tests/test_groupby.py +++ b/xarray/tests/test_groupby.py @@ -13,8 +13,8 @@ def test_consolidate_slices(): assert _consolidate_slices([slice(3), slice(3, 5)]) == [slice(5)] assert _consolidate_slices([slice(2, 3), slice(3, 6)]) == [slice(2, 6)] - assert (_consolidate_slices([slice(2, 3, 1), slice(3, 6, 1)]) - == [slice(2, 6, 1)]) + assert (_consolidate_slices([slice(2, 3, 1), slice(3, 6, 1)]) == + [slice(2, 6, 1)]) slices = [slice(2, 3), slice(5, 6)] assert _consolidate_slices(slices) == slices @@ -51,15 +51,18 @@ def test_groupby_da_datetime(): # test groupby with a DataArray of dtype datetime for GH1132 # create test data times = pd.date_range('2000-01-01', periods=4) - foo = xr.DataArray([1,2,3,4], coords=dict(time=times), dims='time') + foo = xr.DataArray([1, 2, 3, 4], coords=dict(time=times), dims='time') # create test index dd = times.to_pydatetime() reference_dates = [dd[0], dd[2]] - labels = reference_dates[0:1]*2 + reference_dates[1:2]*2 - ind = xr.DataArray(labels, coords=dict(time=times), dims='time', name='reference_date') + labels = reference_dates[0:1] * 2 + reference_dates[1:2] * 2 + ind = xr.DataArray(labels, coords=dict(time=times), dims='time', + name='reference_date') g = foo.groupby(ind) actual = g.sum(dim='time') - expected = xr.DataArray([3,7], coords=dict(reference_date=reference_dates), dims='reference_date') + expected = xr.DataArray([3, 7], + coords=dict(reference_date=reference_dates), + dims='reference_date') assert actual.equals(expected) diff --git a/xarray/tests/test_indexing.py b/xarray/tests/test_indexing.py index fedaadd1522..3b5bbbdb55d 100644 --- a/xarray/tests/test_indexing.py +++ b/xarray/tests/test_indexing.py @@ -27,7 +27,7 @@ def set_to_zero(self, x, i): def test_expanded_indexer(self): x = np.random.randn(10, 11, 12, 13, 14) y = np.arange(5) - I = ReturnItem() + I = ReturnItem() # noqa: E741 # allow ambiguous name for i in [I[:], I[...], I[0, :, 10], I[..., 10], I[:5, ..., 0], I[..., 0, :], I[y], I[y, y], I[..., y, y], I[..., 0, 1, 2, 3, 4]]: @@ -137,7 +137,7 @@ def test_indexer(data, x, expected_pos, expected_idx=None): class TestLazyArray(TestCase): def test_slice_slice(self): - I = ReturnItem() + I = ReturnItem() # noqa: E741 # allow ambiguous name x = np.arange(100) slices = [I[:3], I[:4], I[2:4], I[:1], I[:-1], I[5:-1], I[-5:-1], I[::-1], I[5::-1], I[:3:-1], I[:30:-1], I[10:4:], I[::4], @@ -155,7 +155,7 @@ def test_lazily_indexed_array(self): v = Variable(['i', 'j', 'k'], original) lazy = indexing.LazilyIndexedArray(x) v_lazy = Variable(['i', 'j', 'k'], lazy) - I = ReturnItem() + I = ReturnItem() # noqa: E741 # allow ambiguous name # test orthogonally applied indexers indexers = [I[:], 0, -2, I[:3], [0, 1, 2, 3], [0], np.arange(10) < 5] for i in indexers: @@ -366,7 +366,7 @@ def nonzero(x): original = np.random.rand(10, 20, 30) v = Variable(['i', 'j', 'k'], original) - I = ReturnItem() + I = ReturnItem() # noqa: E741 # allow ambiguous name # test orthogonally applied indexers indexers = [I[:], 0, -2, I[:3], np.array([0, 1, 2, 3]), np.array([0]), np.arange(10) < 5] diff --git a/xarray/tests/test_merge.py b/xarray/tests/test_merge.py index 532b9ee4ff0..409ad86c1e9 100644 --- a/xarray/tests/test_merge.py +++ b/xarray/tests/test_merge.py @@ -76,9 +76,9 @@ def test_merge_no_conflicts_single_var(self): ds2 = xr.Dataset({'a': ('x', [2, 3]), 'x': [1, 2]}) expected = xr.Dataset({'a': ('x', [1, 2, 3]), 'x': [0, 1, 2]}) assert expected.identical(xr.merge([ds1, ds2], - compat='no_conflicts')) + compat='no_conflicts')) assert expected.identical(xr.merge([ds2, ds1], - compat='no_conflicts')) + compat='no_conflicts')) assert ds1.identical(xr.merge([ds1, ds2], compat='no_conflicts', join='left')) diff --git a/xarray/tests/test_plot.py b/xarray/tests/test_plot.py index 4615c59884a..09e77840f26 100644 --- a/xarray/tests/test_plot.py +++ b/xarray/tests/test_plot.py @@ -110,8 +110,8 @@ def test_2d_line_accepts_legend_kw(self): self.darray[:, :, 0].plot.line(x='dim_0', add_legend=True) self.assertTrue(plt.gca().get_legend()) # check whether legend title is set - self.assertTrue(plt.gca().get_legend().get_title().get_text() - == 'dim_1') + self.assertEqual(plt.gca().get_legend().get_title().get_text(), + 'dim_1') def test_2d_line_accepts_x_kw(self): self.darray[:, :, 0].plot.line(x='dim_0') @@ -122,12 +122,12 @@ def test_2d_line_accepts_x_kw(self): def test_2d_line_accepts_hue_kw(self): self.darray[:, :, 0].plot.line(hue='dim_0') - self.assertTrue(plt.gca().get_legend().get_title().get_text() - == 'dim_0') + self.assertEqual(plt.gca().get_legend().get_title().get_text(), + 'dim_0') plt.cla() self.darray[:, :, 0].plot.line(hue='dim_1') - self.assertTrue(plt.gca().get_legend().get_title().get_text() - == 'dim_1') + self.assertEqual(plt.gca().get_legend().get_title().get_text(), + 'dim_1') def test_2d_before_squeeze(self): a = DataArray(easy_array((1, 5))) @@ -786,7 +786,7 @@ def test_no_labels(self): def test_colorbar_kwargs(self): # replace label self.darray.name = 'testvar' - self.plotmethod(add_colorbar=True, cbar_kwargs={'label':'MyLabel'}) + self.plotmethod(add_colorbar=True, cbar_kwargs={'label': 'MyLabel'}) alltxt = text_in_fig() self.assertIn('MyLabel', alltxt) self.assertNotIn('testvar', alltxt) @@ -798,7 +798,7 @@ def test_colorbar_kwargs(self): # change cbar ax fig, (ax, cax) = plt.subplots(1, 2) self.plotmethod(ax=ax, cbar_ax=cax, add_colorbar=True, - cbar_kwargs={'label':'MyBar'}) + cbar_kwargs={'label': 'MyBar'}) self.assertTrue(ax.has_data()) self.assertTrue(cax.has_data()) alltxt = text_in_fig() @@ -807,7 +807,7 @@ def test_colorbar_kwargs(self): # note that there are two ways to achieve this fig, (ax, cax) = plt.subplots(1, 2) self.plotmethod(ax=ax, add_colorbar=True, - cbar_kwargs={'label':'MyBar', 'cax':cax}) + cbar_kwargs={'label': 'MyBar', 'cax': cax}) self.assertTrue(ax.has_data()) self.assertTrue(cax.has_data()) alltxt = text_in_fig() @@ -818,7 +818,7 @@ def test_colorbar_kwargs(self): self.assertNotIn('testvar', text_in_fig()) # check that error is raised pytest.raises(ValueError, self.plotmethod, - add_colorbar=False, cbar_kwargs= {'label':'label'}) + add_colorbar=False, cbar_kwargs={'label': 'label'}) def test_verbose_facetgrid(self): a = easy_array((10, 15, 3)) diff --git a/xarray/tests/test_tutorial.py b/xarray/tests/test_tutorial.py index 56bdccedcfe..05a72153025 100644 --- a/xarray/tests/test_tutorial.py +++ b/xarray/tests/test_tutorial.py @@ -3,7 +3,6 @@ from __future__ import print_function import os -import pytest from xarray import tutorial, DataArray from xarray.core.pycompat import suppress diff --git a/xarray/tests/test_utils.py b/xarray/tests/test_utils.py index 1813e2b6df8..98dd21c5fa0 100644 --- a/xarray/tests/test_utils.py +++ b/xarray/tests/test_utils.py @@ -1,7 +1,6 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function -import pickle import pytest import numpy as np @@ -32,7 +31,7 @@ def test(self): (pd.Index(x, dtype=object), x.astype(object)), (pd.Index(td), td), (pd.Index(td, dtype=object), td.astype(object)), - ]: + ]: actual = utils.safe_cast_to_index(array) self.assertArrayEqual(expected, actual) self.assertEqual(expected.dtype, actual.dtype) @@ -110,9 +109,9 @@ def test_dict_equiv(self): y['c'] = np.inf self.assertTrue(utils.dict_equiv(x, y)) # inf == inf y = dict(y) - self.assertTrue(utils.dict_equiv(x, y)) # different dictionary types are fine + self.assertTrue(utils.dict_equiv(x, y)) # different dict types are ok y['b'] = 3 * np.arange(3) - self.assertFalse(utils.dict_equiv(x, y)) # not equal when arrays differ + self.assertFalse(utils.dict_equiv(x, y)) # unequal when arrays differ def test_frozen(self): x = utils.Frozen(self.x) diff --git a/xarray/tests/test_variable.py b/xarray/tests/test_variable.py index a499289a3c4..c9c8f12486b 100644 --- a/xarray/tests/test_variable.py +++ b/xarray/tests/test_variable.py @@ -183,10 +183,12 @@ def test_index_0d_timedelta64(self): td = timedelta(hours=1) x = self.cls(['x'], [np.timedelta64(td)]) - self._assertIndexedLikeNDArray(x, np.timedelta64(td), 'timedelta64[ns]') + self._assertIndexedLikeNDArray( + x, np.timedelta64(td), 'timedelta64[ns]') x = self.cls(['x'], pd.to_timedelta([td])) - self._assertIndexedLikeNDArray(x, np.timedelta64(td), 'timedelta64[ns]') + self._assertIndexedLikeNDArray( + x, np.timedelta64(td), 'timedelta64[ns]') def test_index_0d_not_a_time(self): d = np.datetime64('NaT', 'ns') @@ -242,11 +244,11 @@ def test_0d_time_data(self): def test_datetime64_conversion(self): times = pd.date_range('2000-01-01', periods=3) for values, preserve_source in [ - (times, True), - (times.values, True), - (times.values.astype('datetime64[s]'), False), - (times.to_pydatetime(), False), - ]: + (times, True), + (times.values, True), + (times.values.astype('datetime64[s]'), False), + (times.to_pydatetime(), False), + ]: v = self.cls(['t'], values) self.assertEqual(v.dtype, np.dtype('datetime64[ns]')) self.assertArrayEqual(v.values, times.values) @@ -257,11 +259,11 @@ def test_datetime64_conversion(self): def test_timedelta64_conversion(self): times = pd.timedelta_range(start=0, periods=3) for values, preserve_source in [ - (times, True), - (times.values, True), - (times.values.astype('timedelta64[s]'), False), - (times.to_pytimedelta(), False), - ]: + (times, True), + (times.values, True), + (times.values.astype('timedelta64[s]'), False), + (times.to_pytimedelta(), False), + ]: v = self.cls(['t'], values) self.assertEqual(v.dtype, np.dtype('timedelta64[ns]')) self.assertArrayEqual(v.values, times.values) @@ -318,7 +320,8 @@ def test_1d_math(self): # binary ops with all variables self.assertArrayEqual(v + v, 2 * v) w = self.cls(['x'], y, {'foo': 'bar'}) - self.assertVariableIdentical(v + w, self.cls(['x'], x + y).to_base_variable()) + self.assertVariableIdentical( + v + w, self.cls(['x'], x + y).to_base_variable()) self.assertArrayEqual((v * w).values, x * y) # something complicated self.assertArrayEqual((v ** 2 * w - 1 + x).values, x ** 2 * y - 1 + x) @@ -349,10 +352,11 @@ def test_array_interface(self): self.assertArrayEqual(v.astype(float), x.astype(float)) # think this is a break, that argsort changes the type self.assertVariableIdentical(v.argsort(), v.to_base_variable()) - self.assertVariableIdentical(v.clip(2, 3), - self.cls('x', x.clip(2, 3)).to_base_variable()) + self.assertVariableIdentical( + v.clip(2, 3), self.cls('x', x.clip(2, 3)).to_base_variable()) # test ufuncs - self.assertVariableIdentical(np.sin(v), self.cls(['x'], np.sin(x)).to_base_variable()) + self.assertVariableIdentical( + np.sin(v), self.cls(['x'], np.sin(x)).to_base_variable()) self.assertIsInstance(np.sin(v), Variable) self.assertNotIsInstance(np.sin(v), IndexVariable) @@ -430,11 +434,15 @@ def test_concat(self): self.assertVariableIdentical(expected, actual) # test concatenating along a dimension v = Variable(['time', 'x'], np.random.random((10, 8))) - self.assertVariableIdentical(v, Variable.concat([v[:5], v[5:]], 'time')) - self.assertVariableIdentical(v, Variable.concat([v[:5], v[5:6], v[6:]], 'time')) - self.assertVariableIdentical(v, Variable.concat([v[:1], v[1:]], 'time')) + self.assertVariableIdentical( + v, Variable.concat([v[:5], v[5:]], 'time')) + self.assertVariableIdentical( + v, Variable.concat([v[:5], v[5:6], v[6:]], 'time')) + self.assertVariableIdentical( + v, Variable.concat([v[:1], v[1:]], 'time')) # test dimension order - self.assertVariableIdentical(v, Variable.concat([v[:, :5], v[:, 5:]], 'x')) + self.assertVariableIdentical( + v, Variable.concat([v[:, :5], v[:, 5:]], 'x')) with raises_regex(ValueError, 'all input arrays must have'): Variable.concat([v[:, 0], v[:, 1:]], 'x') @@ -442,7 +450,8 @@ def test_concat_attrs(self): # different or conflicting attributes should be removed v = self.cls('a', np.arange(5), {'foo': 'bar'}) w = self.cls('a', np.ones(5)) - expected = self.cls('a', np.concatenate([np.arange(5), np.ones(5)])).to_base_variable() + expected = self.cls( + 'a', np.concatenate([np.arange(5), np.ones(5)])).to_base_variable() self.assertVariableIdentical(expected, Variable.concat([v, w], 'a')) w.attrs['foo'] = 2 self.assertVariableIdentical(expected, Variable.concat([v, w], 'a')) @@ -504,7 +513,8 @@ def test_real_and_imag(self): expected_im = self.cls('x', -np.arange(3), {'foo': 'bar'}) self.assertVariableIdentical(v.imag, expected_im) - expected_abs = self.cls('x', np.sqrt(2 * np.arange(3) ** 2)).to_base_variable() + expected_abs = self.cls( + 'x', np.sqrt(2 * np.arange(3) ** 2)).to_base_variable() self.assertVariableAllClose(abs(v), expected_abs) def test_aggregate_complex(self): @@ -756,10 +766,10 @@ def test_numpy_same_methods(self): def test_datetime64_conversion_scalar(self): expected = np.datetime64('2000-01-01', 'ns') for values in [ - np.datetime64('2000-01-01'), - pd.Timestamp('2000-01-01T00'), - datetime(2000, 1, 1), - ]: + np.datetime64('2000-01-01'), + pd.Timestamp('2000-01-01T00'), + datetime(2000, 1, 1), + ]: v = Variable([], values) self.assertEqual(v.dtype, np.dtype('datetime64[ns]')) self.assertEqual(v.values, expected) @@ -768,10 +778,10 @@ def test_datetime64_conversion_scalar(self): def test_timedelta64_conversion_scalar(self): expected = np.timedelta64(24 * 60 * 60 * 10 ** 9, 'ns') for values in [ - np.timedelta64(1, 'D'), - pd.Timedelta('1 day'), - timedelta(days=1), - ]: + np.timedelta64(1, 'D'), + pd.Timedelta('1 day'), + timedelta(days=1), + ]: v = Variable([], values) self.assertEqual(v.dtype, np.dtype('timedelta64[ns]')) self.assertEqual(v.values, expected) @@ -1171,7 +1181,7 @@ def test_transpose_0d(self): np.timedelta64(1, 'h'), None, object(), - ]: + ]: variable = Variable([], value) actual = variable.transpose() assert actual.identical(variable) @@ -1400,11 +1410,11 @@ def test_rank(self): np.testing.assert_allclose(v.rank('x').values, expect_0) np.testing.assert_allclose(v.rank('y').values, expect_1) # int - v = Variable(['x'], [3,2,1]) + v = Variable(['x'], [3, 2, 1]) expect = bn.rankdata(v.data, axis=0) np.testing.assert_allclose(v.rank('x').values, expect) # str - v = Variable(['x'], ['c', 'b', 'a']) + v = Variable(['x'], ['c', 'b', 'a']) expect = bn.rankdata(v.data, axis=0) np.testing.assert_allclose(v.rank('x').values, expect) # pct @@ -1426,7 +1436,8 @@ def test_reduce_funcs(self): v = Variable('x', np.array([1, np.nan, 2, 3])) self.assertVariableIdentical(v.mean(), Variable([], 2)) self.assertVariableIdentical(v.mean(skipna=True), Variable([], 2)) - self.assertVariableIdentical(v.mean(skipna=False), Variable([], np.nan)) + self.assertVariableIdentical(v.mean(skipna=False), + Variable([], np.nan)) self.assertVariableIdentical(np.mean(v), Variable([], 2)) self.assertVariableIdentical(v.prod(), Variable([], 6)) @@ -1677,7 +1688,8 @@ def test_get_level_variable(self): def test_concat_periods(self): periods = pd.period_range('2000-01-01', periods=10) - coords = [IndexVariable('t', periods[:5]), IndexVariable('t', periods[5:])] + coords = [IndexVariable('t', periods[:5]), + IndexVariable('t', periods[5:])] expected = IndexVariable('t', periods) actual = IndexVariable.concat(coords, dim='t') assert actual.identical(expected) @@ -1780,7 +1792,7 @@ def test_datetime(self): def test_full_like(self): # For more thorough tests, see test_variable.py - orig = Variable(dims=('x', 'y'), data=[[1.5 ,2.0], [3.1, 4.3]], + orig = Variable(dims=('x', 'y'), data=[[1.5, 2.0], [3.1, 4.3]], attrs={'foo': 'bar'}) expect = orig.copy(deep=True) @@ -1822,7 +1834,7 @@ def check(actual, expect_dtype, expect_values): assert not isinstance(v, np.ndarray) def test_zeros_like(self): - orig = Variable(dims=('x', 'y'), data=[[1.5 ,2.0], [3.1, 4.3]], + orig = Variable(dims=('x', 'y'), data=[[1.5, 2.0], [3.1, 4.3]], attrs={'foo': 'bar'}) self.assertVariableIdentical(zeros_like(orig), full_like(orig, 0)) @@ -1830,7 +1842,7 @@ def test_zeros_like(self): full_like(orig, 0, dtype=int)) def test_ones_like(self): - orig = Variable(dims=('x', 'y'), data=[[1.5 ,2.0], [3.1, 4.3]], + orig = Variable(dims=('x', 'y'), data=[[1.5, 2.0], [3.1, 4.3]], attrs={'foo': 'bar'}) self.assertVariableIdentical(ones_like(orig), full_like(orig, 1)) @@ -1863,6 +1875,7 @@ def test_raise_no_warning_for_nan_in_binary_ops(): class TestBackendIndexing(TestCase): """ Make sure all the array wrappers can be indexed. """ + def setUp(self): self.d = np.random.random((10, 3)).astype(np.float64) @@ -1882,7 +1895,7 @@ def test_NumpyIndexingAdapter(self): # could not doubly wrapping with raises_regex(TypeError, 'NumpyIndexingAdapter only wraps '): v = Variable(dims=('x', 'y'), data=NumpyIndexingAdapter( - NumpyIndexingAdapter(self.d))) + NumpyIndexingAdapter(self.d))) def test_LazilyIndexedArray(self): v = Variable(dims=('x', 'y'), data=LazilyIndexedArray(self.d)) diff --git a/xarray/ufuncs.py b/xarray/ufuncs.py index 557e741c937..1990ac5b765 100644 --- a/xarray/ufuncs.py +++ b/xarray/ufuncs.py @@ -41,6 +41,7 @@ def _dispatch_priority(obj): class _UFuncDispatcher(object): """Wrapper for dispatching ufuncs.""" + def __init__(self, name): self._name = name diff --git a/xarray/util/print_versions.py b/xarray/util/print_versions.py index 1b25583197f..2c9b1944a74 100755 --- a/xarray/util/print_versions.py +++ b/xarray/util/print_versions.py @@ -25,7 +25,7 @@ def get_sys_info(): stdout=subprocess.PIPE, stderr=subprocess.PIPE) so, serr = pipe.communicate() - except: + except Exception: pass else: if pipe.returncode == 0: @@ -55,7 +55,7 @@ def get_sys_info(): ("LOCALE", "%s.%s" % locale.getlocale()), ]) - except: + except Exception: pass return blob @@ -102,13 +102,13 @@ def show_versions(as_json=False): mod = importlib.import_module(modname) ver = ver_f(mod) deps_blob.append((modname, ver)) - except: + except Exception: deps_blob.append((modname, None)) if (as_json): try: import json - except: + except Exception: import simplejson as json j = dict(system=dict(sys_info), dependencies=dict(deps_blob))