Skip to content

Make flake8 xarray pass #1824

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Jan 14, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ install:
- python xarray/util/print_versions.py

script:
- git diff upstream/master xarray/**/*py | flake8 --diff --exit-zero || true
- flake8 -j auto xarray
- python -OO -c "import xarray"
- py.test xarray --cov=xarray --cov-config ci/.coveragerc --cov-report term-missing --verbose $EXTRA_FLAGS

Expand Down
11 changes: 11 additions & 0 deletions xarray/backends/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,3 +11,14 @@
from .scipy_ import ScipyDataStore
from .h5netcdf_ import H5NetCDFStore
from .zarr import ZarrStore

__all__ = [
'AbstractDataStore',
'InMemoryDataStore',
'NetCDF4DataStore',
'PydapDataStore',
'NioDataStore',
'ScipyDataStore',
'H5NetCDFStore',
'ZarrStore',
]
6 changes: 3 additions & 3 deletions xarray/backends/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,18 +27,18 @@ def _get_default_engine(path, allow_remote=False):
engine = 'netcdf4'
except ImportError:
try:
import pydap
import pydap # flake8: noqa
engine = 'pydap'
except ImportError:
raise ValueError('netCDF4 or pydap is required for accessing '
'remote datasets via OPeNDAP')
else:
try:
import netCDF4
import netCDF4 # flake8: noqa
engine = 'netcdf4'
except ImportError: # pragma: no cover
try:
import scipy.io.netcdf
import scipy.io.netcdf # flake8: noqa
engine = 'scipy'
except ImportError:
raise ValueError('cannot read or write netCDF files without '
Expand Down
1 change: 1 addition & 0 deletions xarray/backends/h5netcdf_.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ def _open_h5netcdf_group(filename, mode, group):
class H5NetCDFStore(WritableCFDataStore, DataStorePickleMixin):
"""Store for reading and writing data via h5netcdf
"""

def __init__(self, filename, mode='r', format=None, group=None,
writer=None, autoclose=False):
if format not in [None, 'NETCDF4']:
Expand Down
1 change: 1 addition & 0 deletions xarray/backends/memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ class InMemoryDataStore(AbstractWritableDataStore):

This store exists purely for internal testing purposes.
"""

def __init__(self, variables=None, attributes=None, writer=None):
self._variables = OrderedDict() if variables is None else variables
self._attributes = OrderedDict() if attributes is None else attributes
Expand Down
1 change: 1 addition & 0 deletions xarray/backends/netCDF4_.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,7 @@ class NetCDF4DataStore(WritableCFDataStore, DataStorePickleMixin):

This store supports NetCDF3, NetCDF4 and OpenDAP datasets.
"""

def __init__(self, netcdf4_dataset, mode='r', writer=None, opener=None,
autoclose=False):

Expand Down
1 change: 1 addition & 0 deletions xarray/backends/pydap_.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ class PydapDataStore(AbstractDataStore):
This store provides an alternative way to access OpenDAP datasets that may
be useful if the netCDF4 library is not available.
"""

def __init__(self, ds):
"""
Parameters
Expand Down
1 change: 1 addition & 0 deletions xarray/backends/pynio_.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ def __getitem__(self, key):
class NioDataStore(AbstractDataStore, DataStorePickleMixin):
"""Store for accessing datasets via PyNIO
"""

def __init__(self, filename, mode='r', autoclose=False):
import Nio
opener = functools.partial(Nio.open_file, filename, mode=mode)
Expand Down
13 changes: 7 additions & 6 deletions xarray/backends/rasterio_.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

class RasterioArrayWrapper(BackendArray):
"""A wrapper around rasterio dataset objects"""

def __init__(self, rasterio_ds):
self.rasterio_ds = rasterio_ds
self._shape = (rasterio_ds.count, rasterio_ds.height,
Expand Down Expand Up @@ -63,9 +64,9 @@ def __getitem__(self, key):
elif is_scalar(k):
# windowed operations will always return an array
# we will have to squeeze it later
squeeze_axis.append(i+1)
squeeze_axis.append(i + 1)
start = k
stop = k+1
stop = k + 1
else:
k = np.asarray(k)
start = k[0]
Expand Down Expand Up @@ -165,10 +166,10 @@ def open_rasterio(filename, chunks=None, cache=None, lock=None):
dx, dy = riods.res[0], -riods.res[1]
x0 = riods.bounds.right if dx < 0 else riods.bounds.left
y0 = riods.bounds.top if dy < 0 else riods.bounds.bottom
coords['y'] = np.linspace(start=y0 + dy/2, num=ny,
stop=(y0 + (ny - 1) * dy) + dy/2)
coords['x'] = np.linspace(start=x0 + dx/2, num=nx,
stop=(x0 + (nx - 1) * dx) + dx/2)
coords['y'] = np.linspace(start=y0 + dy / 2, num=ny,
stop=(y0 + (ny - 1) * dy) + dy / 2)
coords['x'] = np.linspace(start=x0 + dx / 2, num=nx,
stop=(x0 + (nx - 1) * dx) + dx / 2)

# Attributes
attrs = {}
Expand Down
4 changes: 2 additions & 2 deletions xarray/backends/zarr.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,8 @@ def _replace_slices_with_arrays(key, shape):
slice_count += 1
else:
assert isinstance(k, np.ndarray)
k = k[(slice(None),) * array_subspace_size
+ (np.newaxis,) * num_slices]
k = k[(slice(None),) * array_subspace_size +
(np.newaxis,) * num_slices]
new_key.append(k)
return tuple(new_key)

Expand Down
4 changes: 4 additions & 0 deletions xarray/conventions.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ class StackedBytesArray(indexing.ExplicitlyIndexedNDArrayMixin):
array('abc',
dtype='|S3')
"""

def __init__(self, array):
"""
Parameters
Expand Down Expand Up @@ -70,6 +71,7 @@ class BytesToStringArray(indexing.ExplicitlyIndexedNDArrayMixin):
array(['abc'],
dtype=object)
"""

def __init__(self, array, encoding='utf-8'):
"""
Parameters
Expand Down Expand Up @@ -120,6 +122,7 @@ class NativeEndiannessArray(indexing.ExplicitlyIndexedNDArrayMixin):
>>> NativeEndianArray(x)[:].dtype
dtype('int16')
"""

def __init__(self, array):
self.array = indexing.as_indexable(array)

Expand Down Expand Up @@ -148,6 +151,7 @@ class BoolTypeArray(indexing.ExplicitlyIndexedNDArrayMixin):
>>> BoolTypeArray(x)[:].dtype
dtype('bool')
"""

def __init__(self, array):
self.array = indexing.as_indexable(array)

Expand Down
1 change: 1 addition & 0 deletions xarray/core/accessors.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ class DatetimeAccessor(object):
`dayofyear` may not be accurate.

"""

def __init__(self, xarray_obj):
if not is_datetime_like(xarray_obj.dtype):
raise TypeError("'dt' accessor only available for "
Expand Down
2 changes: 1 addition & 1 deletion xarray/core/alignment.py
Original file line number Diff line number Diff line change
Expand Up @@ -363,7 +363,7 @@ def var_indexers(var, indexers):
"Indexer has dimensions {0:s} that are different "
"from that to be indexed along {1:s}. "
"This will behave differently in the future.".format(
str(indexer.dims), dim),
str(indexer.dims), dim),
FutureWarning, stacklevel=3)

if dim in variables:
Expand Down
32 changes: 16 additions & 16 deletions xarray/core/combine.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@
from .alignment import align
from .merge import merge
from .pycompat import iteritems, OrderedDict, basestring
from .variable import Variable, as_variable, IndexVariable, concat as concat_vars
from .variable import Variable, as_variable, IndexVariable, \
concat as concat_vars


def concat(objs, dim=None, data_vars='all', coords='different',
Expand Down Expand Up @@ -103,12 +104,12 @@ def concat(objs, dim=None, data_vars='all', coords='different',

if mode is not None:
raise ValueError('`mode` is no longer a valid argument to '
'xarray.concat; it has been split into the `data_vars` '
'and `coords` arguments')
'xarray.concat; it has been split into the '
'`data_vars` and `coords` arguments')
if concat_over is not None:
raise ValueError('`concat_over` is no longer a valid argument to '
'xarray.concat; it has been split into the `data_vars` '
'and `coords` arguments')
'xarray.concat; it has been split into the '
'`data_vars` and `coords` arguments')

if isinstance(first_obj, DataArray):
f = _dataarray_concat
Expand Down Expand Up @@ -166,8 +167,8 @@ def process_subset_opt(opt, subset):
if k not in concat_over:
# Compare the variable of all datasets vs. the one
# of the first dataset. Perform the minimum amount of
# loads in order to avoid multiple loads from disk while
# keeping the RAM footprint low.
# loads in order to avoid multiple loads from disk
# while keeping the RAM footprint low.
v_lhs = datasets[0].variables[k].load()
# We'll need to know later on if variables are equal.
computed = []
Expand Down Expand Up @@ -199,11 +200,11 @@ def process_subset_opt(opt, subset):
if subset == 'coords':
raise ValueError(
'some variables in coords are not coordinates on '
'the first dataset: %s' % invalid_vars)
'the first dataset: %s' % (invalid_vars,))
else:
raise ValueError(
'some variables in data_vars are not data variables on '
'the first dataset: %s' % invalid_vars)
'some variables in data_vars are not data variables '
'on the first dataset: %s' % (invalid_vars,))
concat_over.update(opt)

process_subset_opt(data_vars, 'data_vars')
Expand Down Expand Up @@ -275,7 +276,6 @@ def insert_result_variable(k, v):
raise ValueError(
'variable %s not equal across datasets' % k)


# we've already verified everything is consistent; now, calculate
# shared dimension sizes so we can expand the necessary variables
dim_lengths = [ds.dims.get(dim, 1) for ds in datasets]
Expand Down Expand Up @@ -377,8 +377,8 @@ def auto_combine(datasets,
This method attempts to combine a list of datasets into a single entity by
inspecting metadata and using a combination of concat and merge.

It does not concatenate along more than one dimension or sort data under any
circumstances. It does align coordinates, but different variables on
It does not concatenate along more than one dimension or sort data under
any circumstances. It does align coordinates, but different variables on
datasets can cause it to fail under some scenarios. In complex cases, you
may need to clean up your data and use ``concat``/``merge`` explicitly.

Expand All @@ -392,9 +392,9 @@ def auto_combine(datasets,
Dataset objects to merge.
concat_dim : str or DataArray or Index, optional
Dimension along which to concatenate variables, as used by
:py:func:`xarray.concat`. You only need to provide this argument if the
dimension along which you want to concatenate is not a dimension in
the original datasets, e.g., if you want to stack a collection of
:py:func:`xarray.concat`. You only need to provide this argument if
the dimension along which you want to concatenate is not a dimension
in the original datasets, e.g., if you want to stack a collection of
2D arrays along a third dimension.
By default, xarray attempts to infer this argument by examining
component files. Set ``concat_dim=None`` explicitly to disable
Expand Down
4 changes: 2 additions & 2 deletions xarray/core/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def __complex__(self):
return complex(self.values)

def __long__(self):
return long(self.values)
return long(self.values) # flake8: noqa

def __array__(self, dtype=None):
return np.asarray(self.values, dtype=dtype)
Expand Down Expand Up @@ -608,7 +608,7 @@ def _resample_immediately(self, freq, dim, how, skipna,
"calculations. Instead of passing 'dim' and "
"'how=\"{how}\", instead consider using "
".resample({dim}=\"{freq}\").{how}() ".format(
dim=dim, freq=freq, how=how
dim=dim, freq=freq, how=how
), DeprecationWarning, stacklevel=3)

if isinstance(dim, basestring):
Expand Down
37 changes: 20 additions & 17 deletions xarray/core/computation.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
"""Functions for applying functions that act on arrays to xarray's labeled data.
"""
Functions for applying functions that act on arrays to xarray's labeled data.

NOT PUBLIC API.
"""
import collections
import functools
import itertools
import operator
import re

import numpy as np

Expand Down Expand Up @@ -36,6 +35,7 @@ class _UFuncSignature(object):
output_core_dims : tuple[tuple]
Core dimension names on each output variable.
"""

def __init__(self, input_core_dims, output_core_dims=((),)):
self.input_core_dims = tuple(tuple(a) for a in input_core_dims)
self.output_core_dims = tuple(tuple(a) for a in output_core_dims)
Expand Down Expand Up @@ -248,8 +248,8 @@ def assert_and_return_exact_match(all_keys):
for keys in all_keys[1:]:
if keys != first_keys:
raise ValueError(
'exact match required for all data variable names, but %r != %r'
% (keys, first_keys))
'exact match required for all data variable names, '
'but %r != %r' % (keys, first_keys))
return first_keys


Expand Down Expand Up @@ -483,9 +483,10 @@ def broadcast_compat_data(variable, broadcast_dims, core_dims):
set_old_dims = set(old_dims)
missing_core_dims = [d for d in core_dims if d not in set_old_dims]
if missing_core_dims:
raise ValueError('operand to apply_ufunc has required core dimensions '
'%r, but some of these are missing on the input '
'variable: %r' % (list(core_dims), missing_core_dims))
raise ValueError(
'operand to apply_ufunc has required core dimensions %r, but '
'some of these are missing on the input variable: %r'
% (list(core_dims), missing_core_dims))

set_new_dims = set(new_dims)
unexpected_dims = [d for d in old_dims if d not in set_new_dims]
Expand Down Expand Up @@ -552,9 +553,11 @@ def apply_variable_ufunc(func, *args, **kwargs):
input_dims = [broadcast_dims + dims
for dims in signature.input_core_dims]
numpy_func = func
func = lambda *arrays: _apply_with_dask_atop(
numpy_func, arrays, input_dims, output_dims, signature,
output_dtypes, output_sizes)

def func(*arrays):
return _apply_with_dask_atop(
numpy_func, arrays, input_dims, output_dims,
signature, output_dtypes, output_sizes)
elif dask == 'allowed':
pass
else:
Expand Down Expand Up @@ -598,8 +601,8 @@ def _apply_with_dask_atop(func, args, input_dims, output_dims, signature,
new_dims = signature.all_output_core_dims - signature.all_input_core_dims
if any(dim not in output_sizes for dim in new_dims):
raise ValueError("when using dask='parallelized' with apply_ufunc, "
'output core dimensions not found on inputs must have '
'explicitly set sizes with ``output_sizes``: {}'
'output core dimensions not found on inputs must '
'have explicitly set sizes with ``output_sizes``: {}'
.format(new_dims))

for n, (data, core_dims) in enumerate(
Expand Down Expand Up @@ -763,9 +766,9 @@ def apply_ufunc(func, *args, **kwargs):
output_dtypes : list of dtypes, optional
Optional list of output dtypes. Only used if dask='parallelized'.
output_sizes : dict, optional
Optional mapping from dimension names to sizes for outputs. Only used if
dask='parallelized' and new dimensions (not found on inputs) appear on
outputs.
Optional mapping from dimension names to sizes for outputs. Only used
if dask='parallelized' and new dimensions (not found on inputs) appear
on outputs.

Returns
-------
Expand Down Expand Up @@ -845,7 +848,7 @@ def earth_mover_distance(first_samples,
----------
.. [1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html
.. [2] http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html
"""
""" # noqa: E501 # don't error on that URL one line up
from .groupby import GroupBy
from .dataarray import DataArray
from .variable import Variable
Expand Down
Loading