Skip to content

Commit 9c9a72e

Browse files
committed
Fix all flake8 issues under default settings
1 parent 7293545 commit 9c9a72e

24 files changed

+288
-195
lines changed

xarray/backends/__init__.py

+11
Original file line numberDiff line numberDiff line change
@@ -11,3 +11,14 @@
1111
from .scipy_ import ScipyDataStore
1212
from .h5netcdf_ import H5NetCDFStore
1313
from .zarr import ZarrStore
14+
15+
__all__ = [
16+
'AbstractDataStore',
17+
'InMemoryDataStore',
18+
'NetCDF4DataStore',
19+
'PydapDataStore',
20+
'NioDataStore',
21+
'ScipyDataStore',
22+
'H5NetCDFStore',
23+
'ZarrStore',
24+
]

xarray/core/combine.py

+16-15
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,8 @@
99
from .alignment import align
1010
from .merge import merge
1111
from .pycompat import iteritems, OrderedDict, basestring
12-
from .variable import Variable, as_variable, IndexVariable, concat as concat_vars
12+
from .variable import Variable, as_variable, IndexVariable, \
13+
concat as concat_vars
1314

1415

1516
def concat(objs, dim=None, data_vars='all', coords='different',
@@ -103,12 +104,12 @@ def concat(objs, dim=None, data_vars='all', coords='different',
103104

104105
if mode is not None:
105106
raise ValueError('`mode` is no longer a valid argument to '
106-
'xarray.concat; it has been split into the `data_vars` '
107-
'and `coords` arguments')
107+
'xarray.concat; it has been split into the '
108+
'`data_vars` and `coords` arguments')
108109
if concat_over is not None:
109110
raise ValueError('`concat_over` is no longer a valid argument to '
110-
'xarray.concat; it has been split into the `data_vars` '
111-
'and `coords` arguments')
111+
'xarray.concat; it has been split into the '
112+
'`data_vars` and `coords` arguments')
112113

113114
if isinstance(first_obj, DataArray):
114115
f = _dataarray_concat
@@ -166,8 +167,8 @@ def process_subset_opt(opt, subset):
166167
if k not in concat_over:
167168
# Compare the variable of all datasets vs. the one
168169
# of the first dataset. Perform the minimum amount of
169-
# loads in order to avoid multiple loads from disk while
170-
# keeping the RAM footprint low.
170+
# loads in order to avoid multiple loads from disk
171+
# while keeping the RAM footprint low.
171172
v_lhs = datasets[0].variables[k].load()
172173
# We'll need to know later on if variables are equal.
173174
computed = []
@@ -199,11 +200,11 @@ def process_subset_opt(opt, subset):
199200
if subset == 'coords':
200201
raise ValueError(
201202
'some variables in coords are not coordinates on '
202-
'the first dataset: %s' % invalid_vars)
203+
'the first dataset: %s' % (invalid_vars,))
203204
else:
204205
raise ValueError(
205-
'some variables in data_vars are not data variables on '
206-
'the first dataset: %s' % invalid_vars)
206+
'some variables in data_vars are not data variables '
207+
'on the first dataset: %s' % (invalid_vars,))
207208
concat_over.update(opt)
208209

209210
process_subset_opt(data_vars, 'data_vars')
@@ -376,8 +377,8 @@ def auto_combine(datasets,
376377
This method attempts to combine a list of datasets into a single entity by
377378
inspecting metadata and using a combination of concat and merge.
378379
379-
It does not concatenate along more than one dimension or sort data under any
380-
circumstances. It does align coordinates, but different variables on
380+
It does not concatenate along more than one dimension or sort data under
381+
any circumstances. It does align coordinates, but different variables on
381382
datasets can cause it to fail under some scenarios. In complex cases, you
382383
may need to clean up your data and use ``concat``/``merge`` explicitly.
383384
@@ -391,9 +392,9 @@ def auto_combine(datasets,
391392
Dataset objects to merge.
392393
concat_dim : str or DataArray or Index, optional
393394
Dimension along which to concatenate variables, as used by
394-
:py:func:`xarray.concat`. You only need to provide this argument if the
395-
dimension along which you want to concatenate is not a dimension in
396-
the original datasets, e.g., if you want to stack a collection of
395+
:py:func:`xarray.concat`. You only need to provide this argument if
396+
the dimension along which you want to concatenate is not a dimension
397+
in the original datasets, e.g., if you want to stack a collection of
397398
2D arrays along a third dimension.
398399
By default, xarray attempts to infer this argument by examining
399400
component files. Set ``concat_dim=None`` explicitly to disable

xarray/core/computation.py

+19-15
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
"""Functions for applying functions that act on arrays to xarray's labeled data.
1+
"""
2+
Functions for applying functions that act on arrays to xarray's labeled data.
23
34
NOT PUBLIC API.
45
"""
@@ -247,8 +248,8 @@ def assert_and_return_exact_match(all_keys):
247248
for keys in all_keys[1:]:
248249
if keys != first_keys:
249250
raise ValueError(
250-
'exact match required for all data variable names, but %r != %r'
251-
% (keys, first_keys))
251+
'exact match required for all data variable names, '
252+
'but %r != %r' % (keys, first_keys))
252253
return first_keys
253254

254255

@@ -482,9 +483,10 @@ def broadcast_compat_data(variable, broadcast_dims, core_dims):
482483
set_old_dims = set(old_dims)
483484
missing_core_dims = [d for d in core_dims if d not in set_old_dims]
484485
if missing_core_dims:
485-
raise ValueError('operand to apply_ufunc has required core dimensions '
486-
'%r, but some of these are missing on the input '
487-
'variable: %r' % (list(core_dims), missing_core_dims))
486+
raise ValueError(
487+
'operand to apply_ufunc has required core dimensions %r, but '
488+
'some of these are missing on the input variable: %r'
489+
% (list(core_dims), missing_core_dims))
488490

489491
set_new_dims = set(new_dims)
490492
unexpected_dims = [d for d in old_dims if d not in set_new_dims]
@@ -551,9 +553,11 @@ def apply_variable_ufunc(func, *args, **kwargs):
551553
input_dims = [broadcast_dims + dims
552554
for dims in signature.input_core_dims]
553555
numpy_func = func
554-
func = lambda *arrays: _apply_with_dask_atop(
555-
numpy_func, arrays, input_dims, output_dims, signature,
556-
output_dtypes, output_sizes)
556+
557+
def func(*arrays):
558+
return _apply_with_dask_atop(
559+
numpy_func, arrays, input_dims, output_dims,
560+
signature, output_dtypes, output_sizes)
557561
elif dask == 'allowed':
558562
pass
559563
else:
@@ -597,8 +601,8 @@ def _apply_with_dask_atop(func, args, input_dims, output_dims, signature,
597601
new_dims = signature.all_output_core_dims - signature.all_input_core_dims
598602
if any(dim not in output_sizes for dim in new_dims):
599603
raise ValueError("when using dask='parallelized' with apply_ufunc, "
600-
'output core dimensions not found on inputs must have '
601-
'explicitly set sizes with ``output_sizes``: {}'
604+
'output core dimensions not found on inputs must '
605+
'have explicitly set sizes with ``output_sizes``: {}'
602606
.format(new_dims))
603607

604608
for n, (data, core_dims) in enumerate(
@@ -762,9 +766,9 @@ def apply_ufunc(func, *args, **kwargs):
762766
output_dtypes : list of dtypes, optional
763767
Optional list of output dtypes. Only used if dask='parallelized'.
764768
output_sizes : dict, optional
765-
Optional mapping from dimension names to sizes for outputs. Only used if
766-
dask='parallelized' and new dimensions (not found on inputs) appear on
767-
outputs.
769+
Optional mapping from dimension names to sizes for outputs. Only used
770+
if dask='parallelized' and new dimensions (not found on inputs) appear
771+
on outputs.
768772
769773
Returns
770774
-------
@@ -844,7 +848,7 @@ def earth_mover_distance(first_samples,
844848
----------
845849
.. [1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html
846850
.. [2] http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html
847-
"""
851+
""" # noqa: E501 # don't error on that URL one line up
848852
from .groupby import GroupBy
849853
from .dataarray import DataArray
850854
from .variable import Variable

xarray/core/coordinates.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ def _merge_inplace(self, other):
107107
# don't include indexes in priority_vars, because we didn't align
108108
# first
109109
priority_vars = OrderedDict(
110-
(k, v) for k, v in self.variables.items() if k not in self.dims)
110+
kv for kv in self.variables.items() if kv[0] not in self.dims)
111111
variables = merge_coords_for_inplace_math(
112112
[self.variables, other.variables], priority_vars=priority_vars)
113113
yield

xarray/core/dataarray.py

+21-21
Original file line numberDiff line numberDiff line change
@@ -316,7 +316,7 @@ def _to_dataset_whole(self, name=None, shallow_copy=True):
316316
if name in self.coords:
317317
raise ValueError('cannot create a Dataset from a DataArray with '
318318
'the same name as one of its coordinates')
319-
# use private APIs here for speed: this is called by _to_temp_dataset(),
319+
# use private APIs for speed: this is called by _to_temp_dataset(),
320320
# which is used in the guts of a lot of operations (e.g., reindex)
321321
variables = self._coords.copy()
322322
variables[name] = self.variable
@@ -428,9 +428,9 @@ def to_index(self):
428428
def dims(self):
429429
"""Tuple of dimension names associated with this array.
430430
431-
Note that the type of this property is inconsistent with `Dataset.dims`.
432-
See `Dataset.sizes` and `DataArray.sizes` for consistently named
433-
properties.
431+
Note that the type of this property is inconsistent with
432+
`Dataset.dims`. See `Dataset.sizes` and `DataArray.sizes` for
433+
consistently named properties.
434434
"""
435435
return self.variable.dims
436436

@@ -868,12 +868,11 @@ def reindex(self, method=None, tolerance=None, copy=True, **indexers):
868868
Maximum distance between original and new labels for inexact
869869
matches. The values of the index at the matching locations most
870870
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
871-
Requires pandas>=0.17.
872871
**indexers : dict
873872
Dictionary with keys given by dimension names and values given by
874-
arrays of coordinates tick labels. Any mis-matched coordinate values
875-
will be filled in with NaN, and any mis-matched dimension names will
876-
simply be ignored.
873+
arrays of coordinates tick labels. Any mis-matched coordinate
874+
values will be filled in with NaN, and any mis-matched dimension
875+
names will simply be ignored.
877876
878877
Returns
879878
-------
@@ -943,8 +942,8 @@ def swap_dims(self, dims_dict):
943942
return self._from_temp_dataset(ds)
944943

945944
def expand_dims(self, dim, axis=None):
946-
"""Return a new object with an additional axis (or axes) inserted at the
947-
corresponding position in the array shape.
945+
"""Return a new object with an additional axis (or axes) inserted at
946+
the corresponding position in the array shape.
948947
949948
If dim is already a scalar coordinate, it will be promoted to a 1D
950949
coordinate consisting of a single value.
@@ -970,16 +969,17 @@ def expand_dims(self, dim, axis=None):
970969
return self._from_temp_dataset(ds)
971970

972971
def set_index(self, append=False, inplace=False, **indexes):
973-
"""Set DataArray (multi-)indexes using one or more existing coordinates.
972+
"""Set DataArray (multi-)indexes using one or more existing
973+
coordinates.
974974
975975
Parameters
976976
----------
977977
append : bool, optional
978978
If True, append the supplied index(es) to the existing index(es).
979979
Otherwise replace the existing index(es) (default).
980980
inplace : bool, optional
981-
If True, set new index(es) in-place. Otherwise, return a new DataArray
982-
object.
981+
If True, set new index(es) in-place. Otherwise, return a new
982+
DataArray object.
983983
**indexes : {dim: index, ...}
984984
Keyword arguments with names matching dimensions and values given
985985
by (lists of) the names of existing coordinates or variables to set
@@ -988,7 +988,7 @@ def set_index(self, append=False, inplace=False, **indexes):
988988
Returns
989989
-------
990990
obj : DataArray
991-
Another dataarray, with this dataarray's data but replaced coordinates.
991+
Another dataarray, with this data but replaced coordinates.
992992
993993
See Also
994994
--------
@@ -1607,9 +1607,9 @@ def from_series(cls, series):
16071607
"""Convert a pandas.Series into an xarray.DataArray.
16081608
16091609
If the series's index is a MultiIndex, it will be expanded into a
1610-
tensor product of one-dimensional coordinates (filling in missing values
1611-
with NaN). Thus this operation should be the inverse of the `to_series`
1612-
method.
1610+
tensor product of one-dimensional coordinates (filling in missing
1611+
values with NaN). Thus this operation should be the inverse of the
1612+
`to_series` method.
16131613
"""
16141614
# TODO: add a 'name' parameter
16151615
name = series.name
@@ -2089,16 +2089,16 @@ def quantile(self, q, dim=None, interpolation='linear', keep_attrs=False):
20892089
numpy.nanpercentile, pandas.Series.quantile, Dataset.quantile
20902090
"""
20912091

2092-
ds = self._to_temp_dataset().quantile(q, dim=dim, keep_attrs=keep_attrs,
2093-
interpolation=interpolation)
2092+
ds = self._to_temp_dataset().quantile(
2093+
q, dim=dim, keep_attrs=keep_attrs, interpolation=interpolation)
20942094
return self._from_temp_dataset(ds)
20952095

20962096
def rank(self, dim, pct=False, keep_attrs=False):
20972097
"""Ranks the data.
20982098
20992099
Equal values are assigned a rank that is the average of the ranks that
2100-
would have been otherwise assigned to all of the values within that set.
2101-
Ranks begin at 1, not 0. If pct is True, computes percentage ranks.
2100+
would have been otherwise assigned to all of the values within that
2101+
set. Ranks begin at 1, not 0. If pct, computes percentage ranks.
21022102
21032103
NaNs in the input array are returned as NaNs.
21042104

xarray/core/extensions.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,9 @@ def __get__(self, obj, cls):
2727
try:
2828
accessor_obj = self._accessor(obj)
2929
except AttributeError:
30-
# __getattr__ on data object will swallow any AttributeErrors raised
31-
# when initializing the accessor, so we need to raise as something
32-
# else (GH933):
30+
# __getattr__ on data object will swallow any AttributeErrors
31+
# raised when initializing the accessor, so we need to raise as
32+
# something else (GH933):
3333
msg = 'error initializing %r accessor.' % self._name
3434
if PY2:
3535
msg += ' Full traceback:\n' + traceback.format_exc()

xarray/core/groupby.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -550,8 +550,8 @@ def reduce(self, func, dim=None, axis=None, keep_attrs=False,
550550
----------
551551
func : function
552552
Function which can be called in the form
553-
`func(x, axis=axis, **kwargs)` to return the result of collapsing an
554-
np.ndarray over an integer valued axis.
553+
`func(x, axis=axis, **kwargs)` to return the result of collapsing
554+
an np.ndarray over an integer valued axis.
555555
dim : str or sequence of str, optional
556556
Dimension(s) over which to apply `func`.
557557
axis : int or sequence of int, optional
@@ -632,8 +632,8 @@ def reduce(self, func, dim=None, keep_attrs=False, **kwargs):
632632
----------
633633
func : function
634634
Function which can be called in the form
635-
`func(x, axis=axis, **kwargs)` to return the result of collapsing an
636-
np.ndarray over an integer valued axis.
635+
`func(x, axis=axis, **kwargs)` to return the result of collapsing
636+
an np.ndarray over an integer valued axis.
637637
dim : str or sequence of str, optional
638638
Dimension(s) over which to apply `func`.
639639
axis : int or sequence of int, optional

xarray/core/indexing.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -125,17 +125,17 @@ def convert_label_indexer(index, label, index_name='', method=None,
125125
_try_get_item(label.stop),
126126
_try_get_item(label.step))
127127
if not isinstance(indexer, slice):
128-
# unlike pandas, in xarray we never want to silently convert a slice
129-
# indexer into an array indexer
128+
# unlike pandas, in xarray we never want to silently convert a
129+
# slice indexer into an array indexer
130130
raise KeyError('cannot represent labeled-based slice indexer for '
131131
'dimension %r with a slice over integer positions; '
132132
'the index is unsorted or non-unique' % index_name)
133133

134134
elif is_dict_like(label):
135135
is_nested_vals = _is_nested_tuple(tuple(label.values()))
136136
if not isinstance(index, pd.MultiIndex):
137-
raise ValueError('cannot use a dict-like object for selection on a '
138-
'dimension that does not have a MultiIndex')
137+
raise ValueError('cannot use a dict-like object for selection on '
138+
'a dimension that does not have a MultiIndex')
139139
elif len(label) == index.nlevels and not is_nested_vals:
140140
indexer = index.get_loc(tuple((label[k] for k in index.names)))
141141
else:

xarray/core/merge.py

+5-3
Original file line numberDiff line numberDiff line change
@@ -507,8 +507,9 @@ def merge(objects, compat='no_conflicts', join='outer'):
507507
from .dataarray import DataArray
508508
from .dataset import Dataset
509509

510-
dict_like_objects = [obj.to_dataset() if isinstance(obj, DataArray) else obj
511-
for obj in objects]
510+
dict_like_objects = [
511+
obj.to_dataset() if isinstance(obj, DataArray) else obj
512+
for obj in objects]
512513

513514
variables, coord_names, dims = merge_core(dict_like_objects, compat, join)
514515
merged = Dataset._construct_direct(variables, coord_names, dims)
@@ -549,4 +550,5 @@ def dataset_merge_method(dataset, other, overwrite_vars, compat, join):
549550

550551
def dataset_update_method(dataset, other):
551552
"""Guts of the Dataset.update method"""
552-
return merge_core([dataset, other], priority_arg=1, indexes=dataset.indexes)
553+
return merge_core([dataset, other], priority_arg=1,
554+
indexes=dataset.indexes)

xarray/core/npcompat.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,8 @@ def _replace_nan(a, val):
3333
If `a` is of inexact type, return a copy of `a` with the NaNs
3434
replaced by the fill value, otherwise return `a`.
3535
mask: {bool, None}
36-
If `a` is of inexact type, return a boolean mask marking locations of
37-
NaNs, otherwise return None.
36+
If `a` is of inexact type, return a boolean mask marking locations
37+
of NaNs, otherwise return None.
3838
3939
"""
4040
is_new = not isinstance(a, np.ndarray)
@@ -206,7 +206,7 @@ def flip(m, axis):
206206
-----
207207
flip(m, 0) is equivalent to flipud(m).
208208
flip(m, 1) is equivalent to fliplr(m).
209-
flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n.
209+
flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at index n.
210210
211211
Examples
212212
--------

xarray/core/rolling.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,8 @@ def __repr__(self):
8787
"""provide a nice str repr of our rolling object"""
8888

8989
attrs = ["{k}->{v}".format(k=k, v=getattr(self, k))
90-
for k in self._attributes if getattr(self, k, None) is not None]
90+
for k in self._attributes
91+
if getattr(self, k, None) is not None]
9192
return "{klass} [{attrs}]".format(klass=self.__class__.__name__,
9293
attrs=','.join(attrs))
9394

0 commit comments

Comments
 (0)