Skip to content

Commit 330d0aa

Browse files
authored
Merge pull request #10193 from pradyunsg/blacken/network
Blacken src/pip/_internal/network/
2 parents a196b3b + 1bc0eef commit 330d0aa

File tree

8 files changed

+114
-105
lines changed

8 files changed

+114
-105
lines changed

.pre-commit-config.yaml

-1
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@ repos:
2525
^src/pip/_internal/commands|
2626
^src/pip/_internal/index|
2727
^src/pip/_internal/models|
28-
^src/pip/_internal/network|
2928
^src/pip/_internal/operations|
3029
^src/pip/_internal/req|
3130
^src/pip/_internal/vcs|

src/pip/_internal/network/auth.py

+23-13
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,8 @@
3131
keyring = None
3232
except Exception as exc:
3333
logger.warning(
34-
"Keyring is skipped due to an exception: %s", str(exc),
34+
"Keyring is skipped due to an exception: %s",
35+
str(exc),
3536
)
3637
keyring = None
3738

@@ -62,14 +63,14 @@ def get_keyring_auth(url: Optional[str], username: Optional[str]) -> Optional[Au
6263

6364
except Exception as exc:
6465
logger.warning(
65-
"Keyring is skipped due to an exception: %s", str(exc),
66+
"Keyring is skipped due to an exception: %s",
67+
str(exc),
6668
)
6769
keyring = None
6870
return None
6971

7072

7173
class MultiDomainBasicAuth(AuthBase):
72-
7374
def __init__(
7475
self, prompting: bool = True, index_urls: Optional[List[str]] = None
7576
) -> None:
@@ -105,8 +106,12 @@ def _get_index_url(self, url: str) -> Optional[str]:
105106
return u
106107
return None
107108

108-
def _get_new_credentials(self, original_url: str, allow_netrc: bool = True,
109-
allow_keyring: bool = False) -> AuthInfo:
109+
def _get_new_credentials(
110+
self,
111+
original_url: str,
112+
allow_netrc: bool = True,
113+
allow_keyring: bool = False,
114+
) -> AuthInfo:
110115
"""Find and return credentials for the specified URL."""
111116
# Split the credentials and netloc from the url.
112117
url, netloc, url_user_password = split_auth_netloc_from_url(
@@ -145,10 +150,12 @@ def _get_new_credentials(self, original_url: str, allow_netrc: bool = True,
145150
# If we don't have a password and keyring is available, use it.
146151
if allow_keyring:
147152
# The index url is more specific than the netloc, so try it first
153+
# fmt: off
148154
kr_auth = (
149155
get_keyring_auth(index_url, username) or
150156
get_keyring_auth(netloc, username)
151157
)
158+
# fmt: on
152159
if kr_auth:
153160
logger.debug("Found credentials in keyring for %s", netloc)
154161
return kr_auth
@@ -189,9 +196,9 @@ def _get_url_and_credentials(
189196

190197
assert (
191198
# Credentials were found
192-
(username is not None and password is not None) or
199+
(username is not None and password is not None)
193200
# Credentials were not found
194-
(username is None and password is None)
201+
or (username is None and password is None)
195202
), f"Could not load credentials from url: {original_url}"
196203

197204
return url, username, password
@@ -244,9 +251,11 @@ def handle_401(self, resp: Response, **kwargs: Any) -> Response:
244251
parsed = urllib.parse.urlparse(resp.url)
245252

246253
# Query the keyring for credentials:
247-
username, password = self._get_new_credentials(resp.url,
248-
allow_netrc=False,
249-
allow_keyring=True)
254+
username, password = self._get_new_credentials(
255+
resp.url,
256+
allow_netrc=False,
257+
allow_keyring=True,
258+
)
250259

251260
# Prompt the user for a new username and password
252261
save = False
@@ -287,7 +296,8 @@ def warn_on_401(self, resp: Response, **kwargs: Any) -> None:
287296
"""Response callback to warn about incorrect credentials."""
288297
if resp.status_code == 401:
289298
logger.warning(
290-
'401 Error, Credentials not correct for %s', resp.request.url,
299+
"401 Error, Credentials not correct for %s",
300+
resp.request.url,
291301
)
292302

293303
def save_credentials(self, resp: Response, **kwargs: Any) -> None:
@@ -300,7 +310,7 @@ def save_credentials(self, resp: Response, **kwargs: Any) -> None:
300310
self._credentials_to_save = None
301311
if creds and resp.status_code < 400:
302312
try:
303-
logger.info('Saving credentials to keyring')
313+
logger.info("Saving credentials to keyring")
304314
keyring.set_password(*creds)
305315
except Exception:
306-
logger.exception('Failed to save credentials')
316+
logger.exception("Failed to save credentials")

src/pip/_internal/network/cache.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ def _get_cache_path(self, name: str) -> str:
5050
def get(self, key: str) -> Optional[bytes]:
5151
path = self._get_cache_path(key)
5252
with suppressed_cache_errors():
53-
with open(path, 'rb') as f:
53+
with open(path, "rb") as f:
5454
return f.read()
5555

5656
def set(self, key: str, value: bytes) -> None:

src/pip/_internal/network/download.py

+14-18
Original file line numberDiff line numberDiff line change
@@ -22,15 +22,15 @@
2222

2323
def _get_http_response_size(resp: Response) -> Optional[int]:
2424
try:
25-
return int(resp.headers['content-length'])
25+
return int(resp.headers["content-length"])
2626
except (ValueError, KeyError, TypeError):
2727
return None
2828

2929

3030
def _prepare_download(
3131
resp: Response,
3232
link: Link,
33-
progress_bar: str
33+
progress_bar: str,
3434
) -> Iterable[bytes]:
3535
total_length = _get_http_response_size(resp)
3636

@@ -42,7 +42,7 @@ def _prepare_download(
4242
logged_url = redact_auth_from_url(url)
4343

4444
if total_length:
45-
logged_url = '{} ({})'.format(logged_url, format_size(total_length))
45+
logged_url = "{} ({})".format(logged_url, format_size(total_length))
4646

4747
if is_from_cache(resp):
4848
logger.info("Using cached %s", logged_url)
@@ -65,9 +65,7 @@ def _prepare_download(
6565
if not show_progress:
6666
return chunks
6767

68-
return DownloadProgressProvider(
69-
progress_bar, max=total_length
70-
)(chunks)
68+
return DownloadProgressProvider(progress_bar, max=total_length)(chunks)
7169

7270

7371
def sanitize_content_filename(filename: str) -> str:
@@ -83,7 +81,7 @@ def parse_content_disposition(content_disposition: str, default_filename: str) -
8381
return the default filename if the result is empty.
8482
"""
8583
_type, params = cgi.parse_header(content_disposition)
86-
filename = params.get('filename')
84+
filename = params.get("filename")
8785
if filename:
8886
# We need to sanitize the filename to prevent directory traversal
8987
# in case the filename contains ".." path parts.
@@ -97,14 +95,12 @@ def _get_http_response_filename(resp: Response, link: Link) -> str:
9795
"""
9896
filename = link.filename # fallback
9997
# Have a look at the Content-Disposition header for a better guess
100-
content_disposition = resp.headers.get('content-disposition')
98+
content_disposition = resp.headers.get("content-disposition")
10199
if content_disposition:
102100
filename = parse_content_disposition(content_disposition, filename)
103101
ext: Optional[str] = splitext(filename)[1]
104102
if not ext:
105-
ext = mimetypes.guess_extension(
106-
resp.headers.get('content-type', '')
107-
)
103+
ext = mimetypes.guess_extension(resp.headers.get("content-type", ""))
108104
if ext:
109105
filename += ext
110106
if not ext and link.url != resp.url:
@@ -115,7 +111,7 @@ def _get_http_response_filename(resp: Response, link: Link) -> str:
115111

116112

117113
def _http_get_download(session: PipSession, link: Link) -> Response:
118-
target_url = link.url.split('#', 1)[0]
114+
target_url = link.url.split("#", 1)[0]
119115
resp = session.get(target_url, headers=HEADERS, stream=True)
120116
raise_for_status(resp)
121117
return resp
@@ -145,15 +141,14 @@ def __call__(self, link: Link, location: str) -> Tuple[str, str]:
145141
filepath = os.path.join(location, filename)
146142

147143
chunks = _prepare_download(resp, link, self._progress_bar)
148-
with open(filepath, 'wb') as content_file:
144+
with open(filepath, "wb") as content_file:
149145
for chunk in chunks:
150146
content_file.write(chunk)
151-
content_type = resp.headers.get('Content-Type', '')
147+
content_type = resp.headers.get("Content-Type", "")
152148
return filepath, content_type
153149

154150

155151
class BatchDownloader:
156-
157152
def __init__(
158153
self,
159154
session: PipSession,
@@ -173,16 +168,17 @@ def __call__(
173168
assert e.response is not None
174169
logger.critical(
175170
"HTTP error %s while getting %s",
176-
e.response.status_code, link,
171+
e.response.status_code,
172+
link,
177173
)
178174
raise
179175

180176
filename = _get_http_response_filename(resp, link)
181177
filepath = os.path.join(location, filename)
182178

183179
chunks = _prepare_download(resp, link, self._progress_bar)
184-
with open(filepath, 'wb') as content_file:
180+
with open(filepath, "wb") as content_file:
185181
for chunk in chunks:
186182
content_file.write(chunk)
187-
content_type = resp.headers.get('Content-Type', '')
183+
content_type = resp.headers.get("Content-Type", "")
188184
yield link, (filepath, content_type)

src/pip/_internal/network/lazy_wheel.py

+13-13
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
"""Lazy ZIP over HTTP"""
22

3-
__all__ = ['HTTPRangeRequestUnsupported', 'dist_from_wheel_url']
3+
__all__ = ["HTTPRangeRequestUnsupported", "dist_from_wheel_url"]
44

55
from bisect import bisect_left, bisect_right
66
from contextlib import contextmanager
@@ -53,19 +53,19 @@ def __init__(
5353
raise_for_status(head)
5454
assert head.status_code == 200
5555
self._session, self._url, self._chunk_size = session, url, chunk_size
56-
self._length = int(head.headers['Content-Length'])
56+
self._length = int(head.headers["Content-Length"])
5757
self._file = NamedTemporaryFile()
5858
self.truncate(self._length)
5959
self._left: List[int] = []
6060
self._right: List[int] = []
61-
if 'bytes' not in head.headers.get('Accept-Ranges', 'none'):
62-
raise HTTPRangeRequestUnsupported('range request is not supported')
61+
if "bytes" not in head.headers.get("Accept-Ranges", "none"):
62+
raise HTTPRangeRequestUnsupported("range request is not supported")
6363
self._check_zip()
6464

6565
@property
6666
def mode(self) -> str:
6767
"""Opening mode, which is always rb."""
68-
return 'rb'
68+
return "rb"
6969

7070
@property
7171
def name(self) -> str:
@@ -94,9 +94,9 @@ def read(self, size: int = -1) -> bytes:
9494
"""
9595
download_size = max(size, self._chunk_size)
9696
start, length = self.tell(), self._length
97-
stop = length if size < 0 else min(start+download_size, length)
98-
start = max(0, stop-download_size)
99-
self._download(start, stop-1)
97+
stop = length if size < 0 else min(start + download_size, length)
98+
start = max(0, stop - download_size)
99+
self._download(start, stop - 1)
100100
return self._file.read(size)
101101

102102
def readable(self) -> bool:
@@ -170,9 +170,9 @@ def _stream_response(
170170
) -> Response:
171171
"""Return HTTP response to a range request from start to end."""
172172
headers = base_headers.copy()
173-
headers['Range'] = f'bytes={start}-{end}'
173+
headers["Range"] = f"bytes={start}-{end}"
174174
# TODO: Get range requests to be correctly cached
175-
headers['Cache-Control'] = 'no-cache'
175+
headers["Cache-Control"] = "no-cache"
176176
return self._session.get(self._url, headers=headers, stream=True)
177177

178178
def _merge(
@@ -187,11 +187,11 @@ def _merge(
187187
right (int): Index after last overlapping downloaded data
188188
"""
189189
lslice, rslice = self._left[left:right], self._right[left:right]
190-
i = start = min([start]+lslice[:1])
191-
end = max([end]+rslice[-1:])
190+
i = start = min([start] + lslice[:1])
191+
end = max([end] + rslice[-1:])
192192
for j, k in zip(lslice, rslice):
193193
if j > i:
194-
yield i, j-1
194+
yield i, j - 1
195195
i = k + 1
196196
if i <= end:
197197
yield i, end

0 commit comments

Comments
 (0)