Skip to content

Commit bc71591

Browse files
[pre-commit.ci] pre-commit autoupdate (#7507)
* [pre-commit.ci] pre-commit autoupdate updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.237 → v0.0.241](astral-sh/ruff-pre-commit@v0.0.237...v0.0.241) - [github.com/psf/black: 22.12.0 → 23.1.0](psf/black@22.12.0...23.1.0) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
1 parent d95e00a commit bc71591

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

62 files changed

+18
-224
lines changed

.pre-commit-config.yaml

+3-3
Original file line numberDiff line numberDiff line change
@@ -16,13 +16,13 @@ repos:
1616
files: ^xarray/
1717
- repo: https://github.com/charliermarsh/ruff-pre-commit
1818
# Ruff version.
19-
rev: 'v0.0.237'
19+
rev: 'v0.0.241'
2020
hooks:
2121
- id: ruff
2222
args: ["--fix"]
2323
# https://github.com/python/black#version-control-integration
2424
- repo: https://github.com/psf/black
25-
rev: 22.12.0
25+
rev: 23.1.0
2626
hooks:
2727
- id: black
2828
- id: black-jupyter
@@ -31,7 +31,7 @@ repos:
3131
hooks:
3232
- id: blackdoc
3333
exclude: "generate_aggregations.py"
34-
additional_dependencies: ["black==22.12.0"]
34+
additional_dependencies: ["black==23.1.0"]
3535
- id: blackdoc-autoupdate-black
3636
- repo: https://github.com/pre-commit/mirrors-mypy
3737
rev: v0.991

doc/conf.py

-1
Original file line numberDiff line numberDiff line change
@@ -436,7 +436,6 @@ def update_videos(app: Sphinx):
436436

437437
items = []
438438
for video in videos:
439-
440439
authors = " | ".join(video["authors"])
441440
item = f"""
442441
.. grid-item-card:: {" ".join(video["title"].split())}

doc/examples/apply_ufunc_vectorize_1d.ipynb

-1
Original file line numberDiff line numberDiff line change
@@ -663,7 +663,6 @@
663663
"\n",
664664
"\n",
665665
"def xr_interp(data, dim, newdim):\n",
666-
"\n",
667666
" interped = xr.apply_ufunc(\n",
668667
" interp1d_np_gufunc, # first the function\n",
669668
" data, # now arguments in the order expected by 'interp1_np'\n",

doc/whats-new.rst

+3-3
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,9 @@ What's New
2020
v2023.02.0 (Feb 7. 2023)
2121
------------------------
2222

23-
This release brings a major upgrade to :py:func:`xarray.concat`, bug fixes and
24-
a bump in supported dependency versions. Thanks to our 9 contributors:
25-
Aron Gergely, Deepak Cherian, Illviljan, James Bourbeau, Joe Hamman,
23+
This release brings a major upgrade to :py:func:`xarray.concat`, bug fixes and
24+
a bump in supported dependency versions. Thanks to our 9 contributors:
25+
Aron Gergely, Deepak Cherian, Illviljan, James Bourbeau, Joe Hamman,
2626
Justus Magin, Kai Mühlbauer, Ken Mankoff, Spencer Clark.
2727

2828
Breaking changes

xarray/backends/cfgrib_.py

-1
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,6 @@ def open_dataset(
119119
squeeze=True,
120120
time_dims=("time", "step"),
121121
):
122-
123122
filename_or_obj = _normalize_path(filename_or_obj)
124123
store = CfGribDataStore(
125124
filename_or_obj,

xarray/backends/h5netcdf_.py

-1
Original file line numberDiff line numberDiff line change
@@ -401,7 +401,6 @@ def open_dataset(
401401
phony_dims=None,
402402
decode_vlen_strings=True,
403403
):
404-
405404
filename_or_obj = _normalize_path(filename_or_obj)
406405
store = H5NetCDFStore.open(
407406
filename_or_obj,

xarray/backends/netCDF4_.py

-1
Original file line numberDiff line numberDiff line change
@@ -573,7 +573,6 @@ def open_dataset(
573573
lock=None,
574574
autoclose=False,
575575
):
576-
577576
filename_or_obj = _normalize_path(filename_or_obj)
578577
store = NetCDF4DataStore.open(
579578
filename_or_obj,

xarray/backends/pseudonetcdf_.py

-1
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,6 @@ def open_dataset(
156156
lock=None,
157157
**format_kwargs,
158158
):
159-
160159
filename_or_obj = _normalize_path(filename_or_obj)
161160
store = PseudoNetCDFDataStore.open(
162161
filename_or_obj, lock=lock, mode=mode, **format_kwargs

xarray/backends/pydap_.py

-1
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,6 @@ def open_dataset(
178178
verify=None,
179179
user_charset=None,
180180
):
181-
182181
store = PydapDataStore.open(
183182
url=filename_or_obj,
184183
application=application,

xarray/backends/scipy_.py

-2
Original file line numberDiff line numberDiff line change
@@ -266,7 +266,6 @@ class ScipyBackendEntrypoint(BackendEntrypoint):
266266
url = "https://docs.xarray.dev/en/stable/generated/xarray.backends.ScipyBackendEntrypoint.html"
267267

268268
def guess_can_open(self, filename_or_obj):
269-
270269
magic_number = try_read_magic_number_from_file_or_path(filename_or_obj)
271270
if magic_number is not None and magic_number.startswith(b"\x1f\x8b"):
272271
with gzip.open(filename_or_obj) as f:
@@ -296,7 +295,6 @@ def open_dataset(
296295
mmap=None,
297296
lock=None,
298297
):
299-
300298
filename_or_obj = _normalize_path(filename_or_obj)
301299
store = ScipyDataStore(
302300
filename_or_obj, mode=mode, format=format, group=group, mmap=mmap, lock=lock

xarray/backends/zarr.py

-1
Original file line numberDiff line numberDiff line change
@@ -882,7 +882,6 @@ def open_dataset(
882882
stacklevel=3,
883883
zarr_version=None,
884884
):
885-
886885
filename_or_obj = _normalize_path(filename_or_obj)
887886
store = ZarrStore.open_group(
888887
filename_or_obj,

xarray/convert.py

-2
Original file line numberDiff line numberDiff line change
@@ -115,10 +115,8 @@ def set_cdms2_attrs(var, attrs):
115115

116116
# Curvilinear and unstructured grids
117117
if dataarray.name not in dataarray.coords:
118-
119118
cdms2_axes = {}
120119
for coord_name in set(dataarray.coords.keys()) - set(dataarray.dims):
121-
122120
coord_array = dataarray.coords[coord_name].to_cdms2()
123121

124122
cdms2_axis_cls = (

xarray/core/accessor_dt.py

-1
Original file line numberDiff line numberDiff line change
@@ -201,7 +201,6 @@ def _strftime(values, date_format):
201201

202202

203203
class TimeAccessor(Generic[T_DataArray]):
204-
205204
__slots__ = ("_obj",)
206205

207206
def __init__(self, obj: T_DataArray) -> None:

xarray/core/alignment.py

-2
Original file line numberDiff line numberDiff line change
@@ -938,7 +938,6 @@ def reindex_like(
938938

939939

940940
def _get_broadcast_dims_map_common_coords(args, exclude):
941-
942941
common_coords = {}
943942
dims_map = {}
944943
for arg in args:
@@ -954,7 +953,6 @@ def _get_broadcast_dims_map_common_coords(args, exclude):
954953
def _broadcast_helper(
955954
arg: T_DataWithCoords, exclude, dims_map, common_coords
956955
) -> T_DataWithCoords:
957-
958956
from xarray.core.dataarray import DataArray
959957
from xarray.core.dataset import Dataset
960958

xarray/core/combine.py

-7
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,6 @@ def _infer_tile_ids_from_nested_list(entry, current_pos):
5353

5454

5555
def _ensure_same_types(series, dim):
56-
5756
if series.dtype == object:
5857
types = set(series.map(type))
5958
if len(types) > 1:
@@ -80,17 +79,14 @@ def _ensure_same_types(series, dim):
8079

8180

8281
def _infer_concat_order_from_coords(datasets):
83-
8482
concat_dims = []
8583
tile_ids = [() for ds in datasets]
8684

8785
# All datasets have same variables because they've been grouped as such
8886
ds0 = datasets[0]
8987
for dim in ds0.dims:
90-
9188
# Check if dim is a coordinate dimension
9289
if dim in ds0:
93-
9490
# Need to read coordinate values to do ordering
9591
indexes = [ds._indexes.get(dim) for ds in datasets]
9692
if any(index is None for index in indexes):
@@ -105,7 +101,6 @@ def _infer_concat_order_from_coords(datasets):
105101
# If dimension coordinate values are same on every dataset then
106102
# should be leaving this dimension alone (it's just a "bystander")
107103
if not all(index.equals(indexes[0]) for index in indexes[1:]):
108-
109104
# Infer order datasets should be arranged in along this dim
110105
concat_dims.append(dim)
111106

@@ -261,7 +256,6 @@ def _combine_all_along_first_dim(
261256
join: JoinOptions = "outer",
262257
combine_attrs: CombineAttrsOptions = "drop",
263258
):
264-
265259
# Group into lines of datasets which must be combined along dim
266260
# need to sort by _new_tile_id first for groupby to work
267261
# TODO: is the sorted need?
@@ -345,7 +339,6 @@ def _nested_combine(
345339
join: JoinOptions = "outer",
346340
combine_attrs: CombineAttrsOptions = "drop",
347341
):
348-
349342
if len(datasets) == 0:
350343
return Dataset()
351344

xarray/core/common.py

-1
Original file line numberDiff line numberDiff line change
@@ -1747,7 +1747,6 @@ def ones_like(
17471747
def get_chunksizes(
17481748
variables: Iterable[Variable],
17491749
) -> Mapping[Any, tuple[int, ...]]:
1750-
17511750
chunks: dict[Any, tuple[int, ...]] = {}
17521751
for v in variables:
17531752
if hasattr(v._data, "chunks"):

xarray/core/computation.py

-2
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,6 @@ def to_gufunc_string(self, exclude_dims=frozenset()):
156156

157157
# enumerate input_core_dims contained in exclude_dims to make them unique
158158
if exclude_dims:
159-
160159
exclude_dims = [self.dims_map[dim] for dim in exclude_dims]
161160

162161
counter = Counter()
@@ -555,7 +554,6 @@ def apply_groupby_func(func, *args):
555554
def unified_dim_sizes(
556555
variables: Iterable[Variable], exclude_dims: AbstractSet = frozenset()
557556
) -> dict[Hashable, int]:
558-
559557
dim_sizes: dict[Hashable, int] = {}
560558

561559
for var in variables:

xarray/core/dataarray.py

-1
Original file line numberDiff line numberDiff line change
@@ -395,7 +395,6 @@ def __init__(
395395

396396
# try to fill in arguments from data if they weren't supplied
397397
if coords is None:
398-
399398
if isinstance(data, DataArray):
400399
coords = data.coords
401400
elif isinstance(data, pd.Series):

xarray/core/dataset.py

-1
Original file line numberDiff line numberDiff line change
@@ -6768,7 +6768,6 @@ def shift(
67686768
fill_value: Any = xrdtypes.NA,
67696769
**shifts_kwargs: int,
67706770
) -> T_Dataset:
6771-
67726771
"""Shift this dataset by an offset along one or more dimensions.
67736772
67746773
Only data variables are moved; coordinates stay in place. This is

xarray/core/duck_array_ops.py

-1
Original file line numberDiff line numberDiff line change
@@ -492,7 +492,6 @@ def datetime_to_numeric(array, offset=None, datetime_unit=None, dtype=float):
492492

493493
# Convert np.NaT to np.nan
494494
elif array.dtype.kind in "mM":
495-
496495
# Convert to specified timedelta units.
497496
if datetime_unit:
498497
array = array / np.timedelta64(1, datetime_unit)

xarray/core/groupby.py

-2
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,6 @@
4646

4747

4848
def check_reduce_dims(reduce_dims, dimensions):
49-
5049
if reduce_dims is not ...:
5150
if is_scalar(reduce_dims):
5251
reduce_dims = [reduce_dims]
@@ -1208,7 +1207,6 @@ class DataArrayGroupBy( # type: ignore[misc]
12081207

12091208

12101209
class DatasetGroupByBase(GroupBy["Dataset"], DatasetGroupbyArithmetic):
1211-
12121210
__slots__ = ()
12131211
_dims: Frozen[Hashable, int] | None
12141212

xarray/core/indexing.py

-2
Original file line numberDiff line numberDiff line change
@@ -1092,7 +1092,6 @@ def _logical_any(args):
10921092

10931093

10941094
def _masked_result_drop_slice(key, data=None):
1095-
10961095
key = (k for k in key if not isinstance(k, slice))
10971096
chunks_hint = getattr(data, "chunks", None)
10981097

@@ -1345,7 +1344,6 @@ def __init__(self, array):
13451344
self.array = array
13461345

13471346
def __getitem__(self, key):
1348-
13491347
if not isinstance(key, VectorizedIndexer):
13501348
# if possible, short-circuit when keys are effectively slice(None)
13511349
# This preserves dask name and passes lazy array equivalence checks

xarray/core/merge.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ def _assert_prioritized_valid(
174174
indexes: dict[int, Index] = {}
175175

176176
for name, elements_list in grouped.items():
177-
for (_, index) in elements_list:
177+
for _, index in elements_list:
178178
if index is not None:
179179
grouped_by_index[id(index)].append(name)
180180
indexes[id(index)] = index

xarray/core/missing.py

-1
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,6 @@ class NumpyInterpolator(BaseInterpolator):
8080
"""
8181

8282
def __init__(self, xi, yi, method="linear", fill_value=None, period=None):
83-
8483
if method != "linear":
8584
raise ValueError("only method `linear` is valid for the NumpyInterpolator")
8685

xarray/core/parallel.py

-1
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@ def assert_chunks_compatible(a: Dataset, b: Dataset):
3333
def check_result_variables(
3434
result: DataArray | Dataset, expected: Mapping[str, Any], kind: str
3535
):
36-
3736
if kind == "coords":
3837
nice_str = "coordinate"
3938
elif kind == "data_vars":

xarray/core/resample.py

-2
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,6 @@ def __init__(
4141
resample_dim: Hashable | None = None,
4242
**kwargs,
4343
) -> None:
44-
4544
if dim == resample_dim:
4645
raise ValueError(
4746
f"Proxy resampling dimension ('{resample_dim}') "
@@ -57,7 +56,6 @@ def _flox_reduce(
5756
keep_attrs: bool | None = None,
5857
**kwargs,
5958
) -> T_Xarray:
60-
6159
from xarray.core.dataarray import DataArray
6260

6361
kwargs.setdefault("method", "cohorts")

xarray/core/rolling.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,8 @@ def _reduce_method( # type: ignore[misc]
132132
name: str, fillna: Any, rolling_agg_func: Callable | None = None
133133
) -> Callable[..., T_Xarray]:
134134
"""Constructs reduction methods built on a numpy reduction function (e.g. sum),
135-
a bottleneck reduction function (e.g. move_sum), or a Rolling reduction (_mean)."""
135+
a bottleneck reduction function (e.g. move_sum), or a Rolling reduction (_mean).
136+
"""
136137
if rolling_agg_func:
137138
array_agg_func = None
138139
else:
@@ -141,7 +142,6 @@ def _reduce_method( # type: ignore[misc]
141142
bottleneck_move_func = getattr(bottleneck, "move_" + name, None)
142143

143144
def method(self, keep_attrs=None, **kwargs):
144-
145145
keep_attrs = self._get_keep_attrs(keep_attrs)
146146

147147
return self._numpy_or_bottleneck_reduce(
@@ -272,7 +272,7 @@ def __iter__(self) -> Iterator[tuple[DataArray, DataArray]]:
272272
starts = stops - window0
273273
starts[: window0 - offset] = 0
274274

275-
for (label, start, stop) in zip(self.window_labels, starts, stops):
275+
for label, start, stop in zip(self.window_labels, starts, stops):
276276
window = self.obj.isel({dim0: slice(start, stop)})
277277

278278
counts = window.count(dim=[dim0])

xarray/core/utils.py

-2
Original file line numberDiff line numberDiff line change
@@ -863,7 +863,6 @@ def drop_dims_from_indexers(
863863
return indexers
864864

865865
elif missing_dims == "warn":
866-
867866
# don't modify input
868867
indexers = dict(indexers)
869868

@@ -912,7 +911,6 @@ def drop_missing_dims(
912911
return supplied_dims
913912

914913
elif missing_dims == "warn":
915-
916914
invalid = set(supplied_dims) - set(dims)
917915
if invalid:
918916
warnings.warn(

0 commit comments

Comments
 (0)