Skip to content

Commit 0590be6

Browse files
Add nbQA for notebook and docs linting (#361)
* Add NBQA for notebook and docs linting This is now possible after nbQA-dev/nbQA#745 which solved this issue (nbQA-dev/nbQA#668) I opened a year ago. * Run pre-commit filters on all files * Lint * bump * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update os * Fix all nbqa issues --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
1 parent 81464a3 commit 0590be6

13 files changed

+128
-107
lines changed

.pre-commit-config.yaml

+9-1
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,17 @@ repos:
1111
- repo: https://github.com/psf/black
1212
rev: 23.3.0
1313
hooks:
14-
- id: black
14+
- id: black-jupyter
1515
- repo: https://github.com/charliermarsh/ruff-pre-commit
1616
rev: "v0.0.261"
1717
hooks:
1818
- id: ruff
1919
args: ["--fix"]
20+
- repo: https://github.com/nbQA-dev/nbQA
21+
rev: 1.7.0
22+
hooks:
23+
- id: nbqa-black
24+
additional_dependencies: [jupytext, black]
25+
- id: nbqa
26+
args: ["ruff", "--fix", "--ignore=E402,B018,F704"]
27+
additional_dependencies: [jupytext, ruff]

docs/source/algorithms_and_examples.md

+13-17
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,13 @@
11
---
2-
kernelspec:
3-
name: python3
4-
display_name: python3
52
jupytext:
63
text_representation:
74
extension: .md
85
format_name: myst
9-
format_version: '0.13'
10-
jupytext_version: 1.13.8
11-
execution:
12-
timeout: 300
6+
format_version: 0.13
7+
jupytext_version: 1.14.5
8+
kernelspec:
9+
display_name: python3
10+
name: python3
1311
---
1412

1513
```{include} ../../README.md
@@ -101,16 +99,17 @@ def plot_loss_interval(learner):
10199
return hv.Scatter((x, y)).opts(size=6, color="r")
102100
103101
104-
def plot(learner, npoints):
105-
adaptive.runner.simple(learner, npoints_goal= npoints)
102+
def plot_interval(learner, npoints):
103+
adaptive.runner.simple(learner, npoints_goal=npoints)
106104
return (learner.plot() * plot_loss_interval(learner))[:, -1.1:1.1]
107105
108106
109107
def get_hm(loss_per_interval, N=101):
110108
learner = adaptive.Learner1D(f, bounds=(-1, 1), loss_per_interval=loss_per_interval)
111-
plots = {n: plot(learner, n) for n in range(N)}
109+
plots = {n: plot_interval(learner, n) for n in range(N)}
112110
return hv.HoloMap(plots, kdims=["npoints"])
113111
112+
114113
plot_homo = get_hm(uniform_loss).relabel("homogeneous sampling")
115114
plot_adaptive = get_hm(default_loss).relabel("with adaptive")
116115
layout = plot_homo + plot_adaptive
@@ -122,7 +121,6 @@ layout.opts(toolbar=None)
122121
```{code-cell} ipython3
123122
:tags: [hide-input]
124123
125-
126124
def ring(xy):
127125
import numpy as np
128126
@@ -131,7 +129,7 @@ def ring(xy):
131129
return x + np.exp(-((x**2 + y**2 - 0.75**2) ** 2) / a**4)
132130
133131
134-
def plot(learner, npoints):
132+
def plot_compare(learner, npoints):
135133
adaptive.runner.simple(learner, npoints_goal=npoints)
136134
learner2 = adaptive.Learner2D(ring, bounds=learner.bounds)
137135
xs = ys = np.linspace(*learner.bounds[0], int(learner.npoints**0.5))
@@ -146,7 +144,7 @@ def plot(learner, npoints):
146144
147145
148146
learner = adaptive.Learner2D(ring, bounds=[(-1, 1), (-1, 1)])
149-
plots = {n: plot(learner, n) for n in range(4, 1010, 20)}
147+
plots = {n: plot_compare(learner, n) for n in range(4, 1010, 20)}
150148
hv.HoloMap(plots, kdims=["npoints"]).collate()
151149
```
152150

@@ -155,7 +153,6 @@ hv.HoloMap(plots, kdims=["npoints"]).collate()
155153
```{code-cell} ipython3
156154
:tags: [hide-input]
157155
158-
159156
def g(n):
160157
import random
161158
@@ -167,12 +164,12 @@ def g(n):
167164
learner = adaptive.AverageLearner(g, atol=None, rtol=0.01)
168165
169166
170-
def plot(learner, npoints):
167+
def plot_avg(learner, npoints):
171168
adaptive.runner.simple(learner, npoints_goal=npoints)
172169
return learner.plot().relabel(f"loss={learner.loss():.2f}")
173170
174171
175-
plots = {n: plot(learner, n) for n in range(10, 10000, 200)}
172+
plots = {n: plot_avg(learner, n) for n in range(10, 10000, 200)}
176173
hv.HoloMap(plots, kdims=["npoints"])
177174
```
178175

@@ -181,7 +178,6 @@ hv.HoloMap(plots, kdims=["npoints"])
181178
```{code-cell} ipython3
182179
:tags: [hide-input]
183180
184-
185181
def sphere(xyz):
186182
import numpy as np
187183

docs/source/logo.md

+2-5
Original file line numberDiff line numberDiff line change
@@ -4,19 +4,16 @@ jupytext:
44
extension: .md
55
format_name: myst
66
format_version: 0.13
7-
jupytext_version: 1.14.1
7+
jupytext_version: 1.14.5
88
kernelspec:
99
display_name: Python 3 (ipykernel)
1010
language: python
1111
name: python3
12-
execution:
13-
timeout: 300
1412
---
1513

1614
```{code-cell} ipython3
1715
:tags: [remove-input]
1816
19-
import os
2017
import functools
2118
import subprocess
2219
import tempfile
@@ -75,7 +72,7 @@ def remove_rounded_corners(fname):
7572
7673
def learner_till(till, learner, data):
7774
new_learner = adaptive.Learner2D(None, bounds=learner.bounds)
78-
new_learner.data = {k: v for k, v in data[:till]}
75+
new_learner.data = dict(data[:till])
7976
for x, y in learner._bounds_points:
8077
# always include the bounds
8178
new_learner.tell((x, y), learner.data[x, y])

docs/source/tutorial/tutorial.BalancingLearner.md

+11-7
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,15 @@
11
---
2-
kernelspec:
3-
name: python3
4-
display_name: python3
52
jupytext:
63
text_representation:
74
extension: .md
85
format_name: myst
9-
format_version: '0.13'
10-
jupytext_version: 1.13.8
6+
format_version: 0.13
7+
jupytext_version: 1.14.5
8+
kernelspec:
9+
display_name: python3
10+
name: python3
1111
---
12+
1213
# Tutorial {class}`~adaptive.BalancingLearner`
1314

1415
```{note}
@@ -60,7 +61,10 @@ runner.live_info()
6061
```
6162

6263
```{code-cell} ipython3
63-
plotter = lambda learner: hv.Overlay([L.plot() for L in learner.learners])
64+
def plotter(learner):
65+
return hv.Overlay([L.plot() for L in learner.learners])
66+
67+
6468
runner.live_plot(plotter=plotter, update_interval=0.1)
6569
```
6670

@@ -83,7 +87,7 @@ combos = {
8387
}
8488
8589
learner = adaptive.BalancingLearner.from_product(
86-
jacobi, adaptive.Learner1D, dict(bounds=(0, 1)), combos
90+
jacobi, adaptive.Learner1D, {"bounds": (0, 1)}, combos
8791
)
8892
8993
runner = adaptive.BlockingRunner(learner, loss_goal=0.01)

docs/source/tutorial/tutorial.DataSaver.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ runner.live_info()
6969
```
7070

7171
```{code-cell} ipython3
72-
runner.live_plot(plotter=lambda l: l.learner.plot(), update_interval=0.1)
72+
runner.live_plot(plotter=lambda lrn: lrn.learner.plot(), update_interval=0.1)
7373
```
7474

7575
Now the `DataSavingLearner` will have an dictionary attribute `extra_data` that has `x` as key and the data that was returned by `learner.function` as values.

docs/source/tutorial/tutorial.IntegratorLearner.md

+7-8
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,15 @@
11
---
2-
kernelspec:
3-
name: python3
4-
display_name: python3
52
jupytext:
63
text_representation:
74
extension: .md
85
format_name: myst
9-
format_version: '0.13'
10-
jupytext_version: 1.13.8
6+
format_version: 0.13
7+
jupytext_version: 1.14.5
8+
kernelspec:
9+
display_name: python3
10+
name: python3
1111
---
12+
1213
# Tutorial {class}`~adaptive.IntegratorLearner`
1314

1415
```{note}
@@ -60,9 +61,7 @@ learner = adaptive.IntegratorLearner(f24, bounds=(0, 3), tol=1e-8)
6061
# We use a SequentialExecutor, which runs the function to be learned in
6162
# *this* process only. This means we don't pay
6263
# the overhead of evaluating the function in another process.
63-
runner = adaptive.Runner(
64-
learner, executor=SequentialExecutor()
65-
)
64+
runner = adaptive.Runner(learner, executor=SequentialExecutor())
6665
```
6766

6867
```{code-cell} ipython3

docs/source/tutorial/tutorial.Learner1D.md

+13-6
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,15 @@
11
---
2-
kernelspec:
3-
name: python3
4-
display_name: python3
52
jupytext:
63
text_representation:
74
extension: .md
85
format_name: myst
9-
format_version: '0.13'
10-
jupytext_version: 1.13.8
6+
format_version: 0.13
7+
jupytext_version: 1.14.5
8+
kernelspec:
9+
display_name: python3
10+
name: python3
1111
---
12+
1213
(TutorialLearner1D)=
1314
# Tutorial {class}`~adaptive.Learner1D`
1415

@@ -112,6 +113,8 @@ random.seed(0)
112113
offsets = [random.uniform(-0.8, 0.8) for _ in range(3)]
113114
114115
# sharp peaks at random locations in the domain
116+
117+
115118
def f_levels(x, offsets=offsets):
116119
a = 0.01
117120
return np.array(
@@ -124,7 +127,9 @@ The `Learner1D` can be used for such functions:
124127

125128
```{code-cell} ipython3
126129
learner = adaptive.Learner1D(f_levels, bounds=(-1, 1))
127-
runner = adaptive.Runner(learner, loss_goal=0.01) # continue until `learner.loss()<=0.01`
130+
runner = adaptive.Runner(
131+
learner, loss_goal=0.01
132+
) # continue until `learner.loss()<=0.01`
128133
```
129134

130135
```{code-cell} ipython3
@@ -211,12 +216,14 @@ learner.to_numpy()
211216
```
212217

213218
If Pandas is installed (optional dependency), you can also run
219+
214220
```{code-cell} ipython3
215221
df = learner.to_dataframe()
216222
df
217223
```
218224

219225
and load that data into a new learner with
226+
220227
```{code-cell} ipython3
221228
new_learner = adaptive.Learner1D(learner.function, (-1, 1)) # create an empty learner
222229
new_learner.load_dataframe(df) # load the pandas.DataFrame's data

docs/source/tutorial/tutorial.Learner2D.md

+7-5
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,15 @@
11
---
2-
kernelspec:
3-
name: python3
4-
display_name: python3
52
jupytext:
63
text_representation:
74
extension: .md
85
format_name: myst
9-
format_version: '0.13'
10-
jupytext_version: 1.13.8
6+
format_version: 0.13
7+
jupytext_version: 1.14.5
8+
kernelspec:
9+
display_name: python3
10+
name: python3
1111
---
12+
1213
# Tutorial {class}`~adaptive.Learner2D`
1314

1415
```{note}
@@ -24,6 +25,7 @@ import holoviews as hv
2425
import numpy as np
2526
2627
from functools import partial
28+
2729
adaptive.notebook_extension()
2830
```
2931

docs/source/tutorial/tutorial.LearnerND.md

+7-7
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,15 @@
11
---
2-
kernelspec:
3-
name: python3
4-
display_name: python3
52
jupytext:
63
text_representation:
74
extension: .md
85
format_name: myst
9-
format_version: '0.13'
10-
jupytext_version: 1.13.8
11-
execution:
12-
timeout: 300
6+
format_version: 0.13
7+
jupytext_version: 1.14.5
8+
kernelspec:
9+
display_name: python3
10+
name: python3
1311
---
12+
1413
# Tutorial {class}`~adaptive.LearnerND`
1514

1615
```{note}
@@ -111,6 +110,7 @@ You could use the following code as an example:
111110
```{code-cell} ipython3
112111
import scipy
113112
113+
114114
def f(xyz):
115115
x, y, z = xyz
116116
return x**4 + y**4 + z**4 - (x**2 + y**2 + z**2) ** 2

0 commit comments

Comments
 (0)