Skip to content

Commit 145baa4

Browse files
authored
Upgrade pinned version of black. (#5853)
This upgrades the pinned version of black to 22.6.0. I encountered the following error when running `black .` on my tensorboard repo: ``` Traceback (most recent call last): File "/usr/local/google/home/bdubois/virtualenv/tensorboard-git/bin/black", line 8, in <module> sys.exit(patched_main()) File "/usr/local/google/home/bdubois/virtualenv/tensorboard-git/lib/python3.10/site-packages/black/__init__.py", line 1322, in patched_main patch_click() File "/usr/local/google/home/bdubois/virtualenv/tensorboard-git/lib/python3.10/site-packages/black/__init__.py", line 1308, in patch_click from click import _unicodefun ImportError: cannot import name '_unicodefun' from 'click' (/usr/local/google/home/bdubois/virtualenv/tensorboard-git/lib/python3.10/site-packages/click/__init__.py) ``` According to https://stackoverflow.com/questions/71673404/importerror-cannot-import-name-unicodefun-from-click thi was fixed in 22.3.0. I choose to upgrade to the slightly more recent 22.6.0. To test: ``` $ pip install -r tensorboard/pip_package/requirements.txt -r tensorboard/pip_package/requirements_dev.txt $ black . ``` Examine diff using `git diff` to check if formatting changes seem reasonable. Ensure python tests pass and tensorboard builds and runs.
1 parent c770732 commit 145baa4

File tree

16 files changed

+39
-42
lines changed

16 files changed

+39
-42
lines changed

tensorboard/backend/event_processing/data_provider_test.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -51,9 +51,9 @@ def setUp(self):
5151
with tf.summary.create_file_writer(logdir).as_default():
5252
for i in range(10):
5353
scalar_summary.scalar(
54-
"square", i ** 2, step=2 * i, description="boxen"
54+
"square", i**2, step=2 * i, description="boxen"
5555
)
56-
scalar_summary.scalar("cube", i ** 3, step=3 * i)
56+
scalar_summary.scalar("cube", i**3, step=3 * i)
5757

5858
logdir = os.path.join(self.logdir, "waves")
5959
with tf.summary.create_file_writer(logdir).as_default():

tensorboard/compat/tensorflow_stub/dtypes.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -328,10 +328,10 @@ def size(self):
328328
np.uint16: (0, 65535),
329329
np.int8: (-128, 127),
330330
np.int16: (-32768, 32767),
331-
np.int64: (-(2 ** 63), 2 ** 63 - 1),
332-
np.uint64: (0, 2 ** 64 - 1),
333-
np.int32: (-(2 ** 31), 2 ** 31 - 1),
334-
np.uint32: (0, 2 ** 32 - 1),
331+
np.int64: (-(2**63), 2**63 - 1),
332+
np.uint64: (0, 2**64 - 1),
333+
np.int32: (-(2**31), 2**31 - 1),
334+
np.uint32: (0, 2**32 - 1),
335335
np.float32: (-1, 1),
336336
np.float64: (-1, 1),
337337
}

tensorboard/pip_package/requirements_dev.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ moto==1.3.7
2525
fsspec==0.7.4
2626

2727
# For linting
28-
black==21.10b0
28+
black==22.6.0
2929
flake8==3.7.8
3030
yamllint==1.17.0
3131

tensorboard/plugins/audio/summary_test.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ def test_wav_format_roundtrip(self):
8787
# WAV roundtrip goes from float32 to int16 and back, so expect some
8888
# precision loss, but not more than 2 applications of rounding error from
8989
# mapping the range [-1.0, 1.0] to 2^16.
90-
epsilon = 2 * 2.0 / (2 ** 16)
90+
epsilon = 2 * 2.0 / (2**16)
9191
self.assertAllClose(audio[0], decoded, atol=epsilon)
9292
self.assertEqual(44100, sample_rate.numpy())
9393

tensorboard/plugins/debugger_v2/debug_data_provider.py

+12-15
Original file line numberDiff line numberDiff line change
@@ -557,22 +557,19 @@ def read_blob_sequences(
557557
continue
558558
output[run] = dict()
559559
for tag in run_tag_filter.tags:
560-
if (
561-
tag.startswith(
562-
(
563-
ALERTS_BLOB_TAG_PREFIX,
564-
EXECUTION_DIGESTS_BLOB_TAG_PREFIX,
565-
EXECUTION_DATA_BLOB_TAG_PREFIX,
566-
GRAPH_EXECUTION_DIGESTS_BLOB_TAG_PREFIX,
567-
GRAPH_EXECUTION_DATA_BLOB_TAG_PREFIX,
568-
GRAPH_INFO_BLOB_TAG_PREFIX,
569-
GRAPH_OP_INFO_BLOB_TAG_PREFIX,
570-
SOURCE_FILE_BLOB_TAG_PREFIX,
571-
STACK_FRAMES_BLOB_TAG_PREFIX,
572-
)
560+
if tag.startswith(
561+
(
562+
ALERTS_BLOB_TAG_PREFIX,
563+
EXECUTION_DIGESTS_BLOB_TAG_PREFIX,
564+
EXECUTION_DATA_BLOB_TAG_PREFIX,
565+
GRAPH_EXECUTION_DIGESTS_BLOB_TAG_PREFIX,
566+
GRAPH_EXECUTION_DATA_BLOB_TAG_PREFIX,
567+
GRAPH_INFO_BLOB_TAG_PREFIX,
568+
GRAPH_OP_INFO_BLOB_TAG_PREFIX,
569+
SOURCE_FILE_BLOB_TAG_PREFIX,
570+
STACK_FRAMES_BLOB_TAG_PREFIX,
573571
)
574-
or tag in (SOURCE_FILE_LIST_BLOB_TAG,)
575-
):
572+
) or tag in (SOURCE_FILE_LIST_BLOB_TAG,):
576573
output[run][tag] = [
577574
provider.BlobReference(blob_key="%s.%s" % (tag, run))
578575
]

tensorboard/plugins/graph/graphs_demo.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ def write_graph():
5353
def f():
5454
x = tf.constant(2)
5555
y = tf.constant(3)
56-
return x ** y
56+
return x**y
5757

5858
with tf.summary.create_file_writer(logdir).as_default():
5959
if hasattr(tf.summary, "graph"):

tensorboard/plugins/graph/graphs_plugin_test.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ def generate_run(
112112
tf.compat.v1.reset_default_graph()
113113
k1 = tf.constant(math.pi, name="k1")
114114
k2 = tf.constant(math.e, name="k2")
115-
result = (k1 ** k2) - k1
115+
result = (k1**k2) - k1
116116
expected = tf.constant(20.0, name="expected")
117117
error = tf.abs(result - expected, name="error")
118118
message_prefix_value = "error " * 1000

tensorboard/plugins/histogram/summary_test.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ def test_normal_distribution_input(self):
9292
self.assertEqual(buckets[:, 0].min(), self.gaussian.min())
9393
# Assert near, not equal, since TF's linspace op introduces floating point
9494
# error in the upper bound of the result.
95-
self.assertNear(buckets[:, 1].max(), self.gaussian.max(), 1.0 ** -10)
95+
self.assertNear(buckets[:, 1].max(), self.gaussian.max(), 1.0**-10)
9696
self.assertEqual(buckets[:, 2].sum(), self.gaussian.size)
9797
np.testing.assert_allclose(buckets[1:, 0], buckets[:-1, 1])
9898

tensorboard/plugins/text/text_plugin_test.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -336,7 +336,7 @@ def make_range_array(dim):
336336
337337
Example: rangeArray(2) results in [[0,1],[2,3]].
338338
"""
339-
return np.array(range(2 ** dim)).reshape([2] * dim)
339+
return np.array(range(2**dim)).reshape([2] * dim)
340340

341341
for i in range(2, 5):
342342
actual = text_plugin.reduce_to_2d(make_range_array(i))

tensorboard/plugins/text_v2/text_v2_plugin_test.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ def make_range_array(dim):
132132
133133
Example: rangeArray(2) results in [[0,1],[2,3]].
134134
"""
135-
return np.array(range(2 ** dim)).reshape([2] * dim)
135+
return np.array(range(2**dim)).reshape([2] * dim)
136136

137137
for i in range(2, 5):
138138
actual = text_v2_plugin.reduce_to_2d(make_range_array(i))

tensorboard/uploader/exporter.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@
4343
_FILENAME_SAFE_CHARS = frozenset(string.ascii_letters + string.digits + "-_")
4444

4545
# Maximum value of a signed 64-bit integer.
46-
_MAX_INT64 = 2 ** 63 - 1
46+
_MAX_INT64 = 2**63 - 1
4747

4848
# Output filename for experiment metadata (creation time, description,
4949
# etc.) within an experiment directory.

tensorboard/uploader/exporter_test.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ def stream_experiment_data(request, **kwargs):
144144

145145
expected_eids_request = export_service_pb2.StreamExperimentsRequest()
146146
expected_eids_request.read_timestamp.CopyFrom(start_time_pb)
147-
expected_eids_request.limit = 2 ** 63 - 1
147+
expected_eids_request.limit = 2**63 - 1
148148
expected_eids_request.experiments_mask.create_time = True
149149
expected_eids_request.experiments_mask.update_time = True
150150
expected_eids_request.experiments_mask.name = True
@@ -357,7 +357,7 @@ def stream_experiment_data(request, **kwargs):
357357

358358
expected_eids_request = export_service_pb2.StreamExperimentsRequest()
359359
expected_eids_request.read_timestamp.CopyFrom(start_time_pb)
360-
expected_eids_request.limit = 2 ** 63 - 1
360+
expected_eids_request.limit = 2**63 - 1
361361
expected_eids_request.experiments_mask.create_time = True
362362
expected_eids_request.experiments_mask.update_time = True
363363
expected_eids_request.experiments_mask.name = True

tensorboard/uploader/server_info.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -48,27 +48,27 @@
4848
# slow and we would otherwise risk Deadline Exceeded errors.
4949
#
5050
# [1]: https://github.com/grpc/grpc/blob/e70d8582b4b0eedc45e3d25a57b58a08b94a9f4a/include/grpc/impl/codegen/grpc_types.h#L447 # pylint: disable=line-too-long
51-
_DEFAULT_MAX_SCALAR_REQUEST_SIZE = 128 * (2 ** 10) # 128KiB
51+
_DEFAULT_MAX_SCALAR_REQUEST_SIZE = 128 * (2**10) # 128KiB
5252

5353
# Maximum WriteTensor request size, if not specified by server_info, in bytes.
5454
# The server-side limit is 4 MiB [1]; we should pad a bit to mitigate any errors
5555
# in our bookkeeping. Currently, we pad a lot.
5656
#
5757
# [1]: https://github.com/grpc/grpc/blob/e70d8582b4b0eedc45e3d25a57b58a08b94a9f4a/include/grpc/impl/codegen/grpc_types.h#L447 # pylint: disable=line-too-long
58-
_DEFAULT_MAX_TENSOR_REQUEST_SIZE = 512 * (2 ** 10) # 512KiB
58+
_DEFAULT_MAX_TENSOR_REQUEST_SIZE = 512 * (2**10) # 512KiB
5959

6060
# Maximum WriteBlob request size, if not specified by server_info, in bytes.
6161
# The server-side limit is 4 MiB [1]; we pad with a 256 KiB chunk to mitigate
6262
# any errors in our bookkeeping.
6363
#
6464
# [1]: https://github.com/grpc/grpc/blob/e70d8582b4b0eedc45e3d25a57b58a08b94a9f4a/include/grpc/impl/codegen/grpc_types.h#L447 # pylint: disable=line-too-long
65-
_DEFAULT_MAX_BLOB_REQUEST_SIZE = 4 * (2 ** 20) - 256 * (2 ** 10) # 4MiB-256KiB
65+
_DEFAULT_MAX_BLOB_REQUEST_SIZE = 4 * (2**20) - 256 * (2**10) # 4MiB-256KiB
6666

6767
# Maximum blob size, if not specified by server_info, in bytes.
68-
_DEFAULT_MAX_BLOB_SIZE = 10 * (2 ** 20) # 10MiB
68+
_DEFAULT_MAX_BLOB_SIZE = 10 * (2**20) # 10MiB
6969

7070
# Maximum tensor point size, if not specified by server_info, in bytes.
71-
_DEFAULT_MAX_TENSOR_POINT_SIZE = 16 * (2 ** 10) # 16KiB
71+
_DEFAULT_MAX_TENSOR_POINT_SIZE = 16 * (2**10) # 16KiB
7272

7373

7474
def _server_info_request(upload_plugins):

tensorboard/uploader/upload_tracker.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -28,10 +28,10 @@ def readable_time_string():
2828

2929
def readable_bytes_string(bytes):
3030
"""Get a human-readable string for number of bytes."""
31-
if bytes >= 2 ** 20:
32-
return "%.1f MB" % (float(bytes) / 2 ** 20)
33-
elif bytes >= 2 ** 10:
34-
return "%.1f kB" % (float(bytes) / 2 ** 10)
31+
if bytes >= 2**20:
32+
return "%.1f MB" % (float(bytes) / 2**20)
33+
elif bytes >= 2**10:
34+
return "%.1f kB" % (float(bytes) / 2**10)
3535
else:
3636
return "%d B" % bytes
3737

tensorboard/uploader/util.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ def set_timestamp(pb, seconds_since_epoch):
113113
seconds_since_epoch: A `float`, as returned by `time.time`.
114114
"""
115115
pb.seconds = int(seconds_since_epoch)
116-
pb.nanos = int(round((seconds_since_epoch % 1) * 10 ** 9))
116+
pb.nanos = int(round((seconds_since_epoch % 1) * 10**9))
117117

118118

119119
def format_time(timestamp_pb, now=None):

tensorboard/util/grpc_util.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -190,7 +190,7 @@ def _compute_backoff_seconds(num_attempts):
190190
_GRPC_RETRY_JITTER_FACTOR_MIN, _GRPC_RETRY_JITTER_FACTOR_MAX
191191
)
192192
backoff_secs = (
193-
_GRPC_RETRY_EXPONENTIAL_BASE ** num_attempts
193+
_GRPC_RETRY_EXPONENTIAL_BASE**num_attempts
194194
) * jitter_factor
195195
return backoff_secs
196196

0 commit comments

Comments
 (0)