diff --git a/.circleci/config.yml b/.circleci/config.yml index 8fa08fc3..88ecbb4e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -11,6 +11,10 @@ jobs: steps: - checkout + - run: + name: Install deps for building atari-py + command: sudo apt-get install -y cmake ffmpeg + - run: name: Install Python dependencies command: | @@ -25,6 +29,7 @@ jobs: - run: name: Build site + no_output_timeout: 30m command: | source venv/bin/activate # n = nitpicky (broken links), W = warnings as errors, diff --git a/content/pairing.md b/content/pairing.md index 7e45a031..2d226de9 100644 --- a/content/pairing.md +++ b/content/pairing.md @@ -59,7 +59,7 @@ output. > supports a variety of restructured text directives. These Sphinx > markdown directives will render when NumPy tutorials are built into a > static website, but they will show up as raw code when you open in -> Jupyter locally or on [Binder](mybinder.org). +> Jupyter locally or on [Binder](https://mybinder.org). Consider these two versions of the same __Simple notebook example__. You have three things in the notebooks: diff --git a/content/tutorial-deep-learning-on-mnist.md b/content/tutorial-deep-learning-on-mnist.md index d7070e44..18041dbd 100644 --- a/content/tutorial-deep-learning-on-mnist.md +++ b/content/tutorial-deep-learning-on-mnist.md @@ -201,12 +201,18 @@ print('The data type of training images: {}'.format(x_train.dtype)) print('The data type of test images: {}'.format(x_test.dtype)) ``` -**2.** Normalize the arrays by dividing them by 255 (and thus promoting the data type from `uint8` to `float64`) and then assign the train and test image data variables — `x_train` and `x_test` — to `training_images` and `train_labels`, respectively. To make the neural network model train faster in this example, `training_images` contains only 1,000 samples out of 60,000. To learn from the entire sample size, change the `sample` variable to `60000`. +**2.** Normalize the arrays by dividing them by 255 (and thus promoting the data type from `uint8` to `float64`) and then assign the train and test image data variables — `x_train` and `x_test` — to `training_images` and `train_labels`, respectively. +To reduce the model training and evaluation time in this example, only a subset +of the training and test images will be used. +Both `training_images` and `test_images` will contain only 1,000 samples each out +of the complete datasets of 60,000 and 10,000 images, respectively. +These values can be controlled by changing the `training_sample` and +`test_sample` below, up to their maximum values of 60,000 and 10,000. ```{code-cell} ipython3 -sample = 1000 -training_images = x_train[0:sample] / 255 -test_images = x_test / 255 +training_sample, test_sample = 1000, 1000 +training_images = x_train[0:training_sample] / 255 +test_images = x_test[0:test_sample] / 255 ``` **3.** Confirm that the image data has changed to the floating-point format: @@ -257,8 +263,8 @@ def one_hot_encoding(labels, dimension=10): **3.** Encode the labels and assign the values to new variables: ```{code-cell} ipython3 -training_labels = one_hot_encoding(y_train) -test_labels = one_hot_encoding(y_test) +training_labels = one_hot_encoding(y_train[:training_sample]) +test_labels = one_hot_encoding(y_test[:test_sample]) ``` **4.** Check that the data type has changed to floating point: @@ -405,6 +411,8 @@ weights_2 = 0.2 * np.random.random((hidden_size, num_labels)) - 0.1 ``` **5.** Set up the neural network's learning experiment with a training loop and start the training process. +Note that the model is evaluated against the test set at each epoch to track +its performance over the training epochs. Start the training process: @@ -419,6 +427,11 @@ store_test_accurate_pred = [] # This is a training loop. # Run the learning experiment for a defined number of epochs (iterations). for j in range(epochs): + + ################# + # Training step # + ################# + # Set the initial loss/error and the number of accurate predictions to zero. training_loss = 0.0 training_accurate_predictions = 0 @@ -467,32 +480,32 @@ for j in range(epochs): store_training_loss.append(training_loss) store_training_accurate_pred.append(training_accurate_predictions) - # Evaluate on the test set: - # 1. Set the initial error and the number of accurate predictions to zero. - test_loss = 0.0 - test_accurate_predictions = 0 - - # 2. Start testing the model by evaluating on the test image dataset. - for i in range(len(test_images)): - # 1. Pass the test images through the input layer. - layer_0 = test_images[i] - # 2. Compute the weighted sum of the test image inputs in and - # pass the hidden layer's output through ReLU. - layer_1 = relu(np.dot(layer_0, weights_1)) - # 3. Compute the weighted sum of the hidden layer's inputs. - # Produce a 10-dimensional vector with 10 scores. - layer_2 = np.dot(layer_1, weights_2) + ################### + # Evaluation step # + ################### + + # Evaluate model performance on the test set at each epoch. + + # Unlike the training step, the weights are not modified for each image + # (or batch). Therefore the model can be applied to the test images in a + # vectorized manner, eliminating the need to loop over each image + # individually: + + results = relu(test_images @ weights_1) @ weights_2 + + # Measure the error between the actual label (truth) and prediction values. + test_loss = np.sum((test_labels - results)**2) - # 4. Measure the error between the actual label (truth) and prediction values. - test_loss += np.sum((test_labels[i] - layer_2) ** 2) - # 5. Increment the accurate prediction count. - test_accurate_predictions += int(np.argmax(layer_2) == np.argmax(test_labels[i])) + # Measure prediction accuracy on test set + test_accurate_predictions = np.sum( + np.argmax(results, axis=1) == np.argmax(test_labels, axis=1) + ) # Store test set losses and accurate predictions. store_test_loss.append(test_loss) store_test_accurate_pred.append(test_accurate_predictions) - # 3. Display the error and accuracy metrics in the output. + # Summarize error and accuracy metrics at each epoch print("\n" + \ "Epoch: " + str(j) + \ " Training set error:" + str(training_loss/ float(len(training_images)))[0:5] +\ diff --git a/content/tutorial-deep-reinforcement-learning-with-pong-from-pixels.md b/content/tutorial-deep-reinforcement-learning-with-pong-from-pixels.md index 0fd4b02b..381943d7 100644 --- a/content/tutorial-deep-reinforcement-learning-with-pong-from-pixels.md +++ b/content/tutorial-deep-reinforcement-learning-with-pong-from-pixels.md @@ -22,7 +22,7 @@ This tutorial demonstrates how to implement a deep reinforcement learning (RL) a Pong is a 2D game from 1972 where two players use "rackets" to play a form of table tennis. Each player moves the racket up and down the screen and tries to hit a ball in their opponent's direction by touching it. The goal is to hit the ball such that it goes past the opponent's racket (they miss their shot). According to the rules, if a player reaches 21 points, they win. In Pong, the RL agent that learns to play against an opponent is displayed on the right. -