Skip to content

Commit a7714c7

Browse files
authored
Trim trailing whitespace (#2563)
1 parent a615ee9 commit a7714c7

File tree

9 files changed

+37
-36
lines changed

9 files changed

+37
-36
lines changed

.github/workflows/gpu_test.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ jobs:
5555
cache: 'pip'
5656
- name: Install Hatch and CuPy
5757
run: |
58-
python -m pip install --upgrade pip
58+
python -m pip install --upgrade pip
5959
pip install hatch
6060
- name: Set Up Hatch Env
6161
run: |

.github/workflows/releases.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ jobs:
2323

2424
- name: Install PyBuild
2525
run: |
26-
python -m pip install --upgrade pip
26+
python -m pip install --upgrade pip
2727
pip install hatch
2828
- name: Build wheel and sdist
2929
run: hatch build

.github/workflows/test.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ jobs:
5252
cache: 'pip'
5353
- name: Install Hatch
5454
run: |
55-
python -m pip install --upgrade pip
55+
python -m pip install --upgrade pip
5656
pip install hatch
5757
- name: Set Up Hatch Env
5858
run: |
@@ -84,7 +84,7 @@ jobs:
8484
cache: 'pip'
8585
- name: Install Hatch
8686
run: |
87-
python -m pip install --upgrade pip
87+
python -m pip install --upgrade pip
8888
pip install hatch
8989
- name: Set Up Hatch Env
9090
run: |

.pre-commit-config.yaml

+1
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ repos:
2020
rev: v5.0.0
2121
hooks:
2222
- id: check-yaml
23+
- id: trailing-whitespace
2324
- repo: https://github.com/pre-commit/mirrors-mypy
2425
rev: v1.13.0
2526
hooks:

README-v3.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ hatch env create test
3838
## Run the Tests
3939

4040
```
41-
hatch run test:run
41+
hatch run test:run
4242
```
4343

4444
or

bench/compress_normal.txt

+20-20
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ Line # Hits Time Per Hit % Time Line Contents
1919
==============================================================
2020
137 def compress(source, char* cname, int clevel, int shuffle):
2121
138 """Compress data in a numpy array.
22-
139
22+
139
2323
140 Parameters
2424
141 ----------
2525
142 source : array-like
@@ -30,33 +30,33 @@ Line # Hits Time Per Hit % Time Line Contents
3030
147 Compression level.
3131
148 shuffle : int
3232
149 Shuffle filter.
33-
150
33+
150
3434
151 Returns
3535
152 -------
3636
153 dest : bytes-like
3737
154 Compressed data.
38-
155
38+
155
3939
156 """
40-
157
40+
157
4141
158 cdef:
4242
159 char *source_ptr
4343
160 char *dest_ptr
4444
161 Py_buffer source_buffer
4545
162 size_t nbytes, cbytes, itemsize
4646
163 200 506 2.5 0.2 array.array char_array_template = array.array('b', [])
4747
164 array.array dest
48-
165
48+
165
4949
166 # setup source buffer
5050
167 200 458 2.3 0.2 PyObject_GetBuffer(source, &source_buffer, PyBUF_ANY_CONTIGUOUS)
5151
168 200 119 0.6 0.0 source_ptr = <char *> source_buffer.buf
52-
169
52+
169
5353
170 # setup destination
5454
171 200 239 1.2 0.1 nbytes = source_buffer.len
5555
172 200 103 0.5 0.0 itemsize = source_buffer.itemsize
5656
173 200 2286 11.4 0.8 dest = array.clone(char_array_template, nbytes + BLOSC_MAX_OVERHEAD,
5757
174 zero=False)
5858
175 200 129 0.6 0.0 dest_ptr = <char *> dest.data.as_voidptr
59-
176
59+
176
6060
177 # perform compression
6161
178 200 1734 8.7 0.6 if _get_use_threads():
6262
179 # allow blosc to use threads internally
@@ -67,24 +67,24 @@ Line # Hits Time Per Hit % Time Line Contents
6767
184 cbytes = blosc_compress(clevel, shuffle, itemsize, nbytes,
6868
185 source_ptr, dest_ptr,
6969
186 nbytes + BLOSC_MAX_OVERHEAD)
70-
187
70+
187
7171
188 else:
7272
189 with nogil:
7373
190 cbytes = blosc_compress_ctx(clevel, shuffle, itemsize, nbytes,
7474
191 source_ptr, dest_ptr,
7575
192 nbytes + BLOSC_MAX_OVERHEAD, cname,
7676
193 0, 1)
77-
194
77+
194
7878
195 # release source buffer
7979
196 200 616 3.1 0.2 PyBuffer_Release(&source_buffer)
80-
197
80+
197
8181
198 # check compression was successful
8282
199 200 120 0.6 0.0 if cbytes <= 0:
8383
200 raise RuntimeError('error during blosc compression: %d' % cbytes)
84-
201
84+
201
8585
202 # resize after compression
8686
203 200 1896 9.5 0.6 array.resize(dest, cbytes)
87-
204
87+
204
8888
205 200 186 0.9 0.1 return dest
8989

9090
*******************************************************************************
@@ -100,19 +100,19 @@ Line # Hits Time Per Hit % Time Line Contents
100100
==============================================================
101101
75 def decompress(source, dest):
102102
76 """Decompress data.
103-
77
103+
77
104104
78 Parameters
105105
79 ----------
106106
80 source : bytes-like
107107
81 Compressed data, including blosc header.
108108
82 dest : array-like
109109
83 Object to decompress into.
110-
84
110+
84
111111
85 Notes
112112
86 -----
113113
87 Assumes that the size of the destination buffer is correct for the size of
114114
88 the uncompressed data.
115-
89
115+
89
116116
90 """
117117
91 cdef:
118118
92 int ret
@@ -122,7 +122,7 @@ Line # Hits Time Per Hit % Time Line Contents
122122
96 array.array source_array
123123
97 Py_buffer dest_buffer
124124
98 size_t nbytes
125-
99
125+
99
126126
100 # setup source buffer
127127
101 200 573 2.9 0.2 if PY2 and isinstance(source, array.array):
128128
102 # workaround fact that array.array does not support new-style buffer
@@ -134,13 +134,13 @@ Line # Hits Time Per Hit % Time Line Contents
134134
108 200 112 0.6 0.0 release_source_buffer = True
135135
109 200 144 0.7 0.1 PyObject_GetBuffer(source, &source_buffer, PyBUF_ANY_CONTIGUOUS)
136136
110 200 98 0.5 0.0 source_ptr = <char *> source_buffer.buf
137-
111
137+
111
138138
112 # setup destination buffer
139139
113 200 552 2.8 0.2 PyObject_GetBuffer(dest, &dest_buffer,
140140
114 PyBUF_ANY_CONTIGUOUS | PyBUF_WRITEABLE)
141141
115 200 100 0.5 0.0 dest_ptr = <char *> dest_buffer.buf
142142
116 200 84 0.4 0.0 nbytes = dest_buffer.len
143-
117
143+
117
144144
118 # perform decompression
145145
119 200 1856 9.3 0.8 if _get_use_threads():
146146
120 # allow blosc to use threads internally
@@ -149,12 +149,12 @@ Line # Hits Time Per Hit % Time Line Contents
149149
123 else:
150150
124 with nogil:
151151
125 ret = blosc_decompress_ctx(source_ptr, dest_ptr, nbytes, 1)
152-
126
152+
126
153153
127 # release buffers
154154
128 200 754 3.8 0.3 if release_source_buffer:
155155
129 200 326 1.6 0.1 PyBuffer_Release(&source_buffer)
156156
130 200 165 0.8 0.1 PyBuffer_Release(&dest_buffer)
157-
131
157+
131
158158
132 # handle errors
159159
133 200 128 0.6 0.1 if ret <= 0:
160160
134 raise RuntimeError('error during blosc decompression: %d' % ret)

docs/guide/storage.rst

+5-5
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ Storage
44
Zarr-Python supports multiple storage backends, including: local file systems,
55
Zip files, remote stores via ``fsspec`` (S3, HTTP, etc.), and in-memory stores. In
66
Zarr-Python 3, stores must implement the abstract store API from
7-
:class:`zarr.abc.store.Store`.
7+
:class:`zarr.abc.store.Store`.
88

99
.. note::
1010
Unlike Zarr-Python 2 where the store interface was built around a generic ``MutableMapping``
@@ -50,8 +50,8 @@ filesystem.
5050
Zip Store
5151
~~~~~~~~~
5252

53-
The :class:`zarr.storage.ZipStore` stores the contents of a Zarr hierarchy in a single
54-
Zip file. The `Zip Store specification_` is currently in draft form.
53+
The :class:`zarr.storage.ZipStore` stores the contents of a Zarr hierarchy in a single
54+
Zip file. The `Zip Store specification_` is currently in draft form.
5555

5656
.. code-block:: python
5757
@@ -65,7 +65,7 @@ Remote Store
6565

6666
The :class:`zarr.storage.RemoteStore` stores the contents of a Zarr hierarchy in following the same
6767
logical layout as the ``LocalStore``, except the store is assumed to be on a remote storage system
68-
such as cloud object storage (e.g. AWS S3, Google Cloud Storage, Azure Blob Store). The
68+
such as cloud object storage (e.g. AWS S3, Google Cloud Storage, Azure Blob Store). The
6969
:class:`zarr.storage.RemoteStore` is backed by `Fsspec_` and can support any Fsspec backend
7070
that implements the `AbstractFileSystem` API,
7171

@@ -80,7 +80,7 @@ Memory Store
8080
~~~~~~~~~~~~
8181

8282
The :class:`zarr.storage.RemoteStore` a in-memory store that allows for serialization of
83-
Zarr data (metadata and chunks) to a dictionary.
83+
Zarr data (metadata and chunks) to a dictionary.
8484

8585
.. code-block:: python
8686

docs/roadmap.rst

+4-4
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ Roadmap
1616
- Martin Durrant / @martindurant
1717

1818
.. note::
19-
19+
2020
This document was written in the early stages of the 3.0 refactor. Some
2121
aspects of the design have changed since this was originally written.
2222
Questions and discussion about the contents of this document should be directed to
@@ -227,7 +227,7 @@ expose the required methods as async methods.
227227
228228
async def get_partial_values(self, key_ranges: List[Tuple[str, int, int]) -> bytes:
229229
...
230-
230+
231231
async def set(self, key: str, value: Union[bytes, bytearray, memoryview]) -> None:
232232
... # required for writable stores
233233
@@ -246,10 +246,10 @@ expose the required methods as async methods.
246246
# additional (optional methods)
247247
async def getsize(self, prefix: str) -> int:
248248
...
249-
249+
250250
async def rename(self, src: str, dest: str) -> None
251251
...
252-
252+
253253
254254
Recognizing that there are many Zarr applications today that rely on the
255255
``MutableMapping`` interface supported by Zarr-Python 2, a wrapper store

docs/tutorial.rst

+2-2
Original file line numberDiff line numberDiff line change
@@ -1015,12 +1015,12 @@ class from ``fsspec``. The following example demonstrates how to access
10151015
a ZIP-archived Zarr group on s3 using `s3fs <https://s3fs.readthedocs.io/en/latest/>`_ and ``ZipFileSystem``:
10161016

10171017
>>> s3_path = "s3://path/to/my.zarr.zip"
1018-
>>>
1018+
>>>
10191019
>>> s3 = s3fs.S3FileSystem()
10201020
>>> f = s3.open(s3_path)
10211021
>>> fs = ZipFileSystem(f, mode="r")
10221022
>>> store = FSMap("", fs, check=False)
1023-
>>>
1023+
>>>
10241024
>>> # caching may improve performance when repeatedly reading the same data
10251025
>>> cache = zarr.storage.LRUStoreCache(store, max_size=2**28)
10261026
>>> z = zarr.group(store=cache)

0 commit comments

Comments
 (0)