From d96c1644bb21a2ac47c580f31a82cf0dd1e59615 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20Geron?= Date: Thu, 5 Oct 2017 13:22:06 +0200 Subject: [PATCH] Add solution to exercise 7 in chapter 14 --- 14_recurrent_neural_networks.ipynb | 1169 ++++++++++++++-------------- 1 file changed, 576 insertions(+), 593 deletions(-) diff --git a/14_recurrent_neural_networks.ipynb b/14_recurrent_neural_networks.ipynb index 5096441..c9eadd3 100644 --- a/14_recurrent_neural_networks.ipynb +++ b/14_recurrent_neural_networks.ipynb @@ -2,40 +2,28 @@ "cells": [ { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "**Chapter 14 – Recurrent Neural Networks**" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "_This notebook contains all the sample code and solutions to the exercises in chapter 14._" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "# Setup" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:" ] @@ -44,9 +32,7 @@ "cell_type": "code", "execution_count": 1, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -85,10 +71,7 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "Then of course we will need TensorFlow:" ] @@ -97,9 +80,7 @@ "cell_type": "code", "execution_count": 2, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -108,20 +89,14 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "# Basic RNNs" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "## Manual RNN" ] @@ -130,9 +105,7 @@ "cell_type": "code", "execution_count": 3, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -158,9 +131,7 @@ "cell_type": "code", "execution_count": 4, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -177,11 +148,7 @@ { "cell_type": "code", "execution_count": 5, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "print(Y0_val)" @@ -190,11 +157,7 @@ { "cell_type": "code", "execution_count": 6, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "print(Y1_val)" @@ -202,10 +165,7 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "## Using `static_rnn()`" ] @@ -214,9 +174,7 @@ "cell_type": "code", "execution_count": 7, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -228,9 +186,7 @@ "cell_type": "code", "execution_count": 8, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -249,9 +205,7 @@ "cell_type": "code", "execution_count": 9, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -262,9 +216,7 @@ "cell_type": "code", "execution_count": 10, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -279,11 +231,7 @@ { "cell_type": "code", "execution_count": 11, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "Y0_val" @@ -292,11 +240,7 @@ { "cell_type": "code", "execution_count": 12, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "Y1_val" @@ -306,9 +250,7 @@ "cell_type": "code", "execution_count": 13, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -353,11 +295,7 @@ { "cell_type": "code", "execution_count": 14, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "show_graph(tf.get_default_graph())" @@ -365,10 +303,7 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "## Packing sequences" ] @@ -377,9 +312,7 @@ "cell_type": "code", "execution_count": 15, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -392,9 +325,7 @@ "cell_type": "code", "execution_count": 16, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -413,9 +344,7 @@ "cell_type": "code", "execution_count": 17, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -426,9 +355,7 @@ "cell_type": "code", "execution_count": 18, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -448,9 +375,7 @@ { "cell_type": "code", "execution_count": 19, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "print(outputs_val)" @@ -459,11 +384,7 @@ { "cell_type": "code", "execution_count": 20, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "print(np.transpose(outputs_val, axes=[1, 0, 2])[1])" @@ -471,10 +392,7 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "## Using `dynamic_rnn()`" ] @@ -483,9 +401,7 @@ "cell_type": "code", "execution_count": 21, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -498,9 +414,7 @@ "cell_type": "code", "execution_count": 22, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -516,9 +430,7 @@ "cell_type": "code", "execution_count": 23, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -529,9 +441,7 @@ "cell_type": "code", "execution_count": 24, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -550,9 +460,7 @@ { "cell_type": "code", "execution_count": 25, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "print(outputs_val)" @@ -561,11 +469,7 @@ { "cell_type": "code", "execution_count": 26, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "show_graph(tf.get_default_graph())" @@ -573,10 +477,7 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "## Setting the sequence lengths" ] @@ -585,9 +486,7 @@ "cell_type": "code", "execution_count": 27, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -605,9 +504,7 @@ "cell_type": "code", "execution_count": 28, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -620,9 +517,7 @@ "cell_type": "code", "execution_count": 29, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -633,9 +528,7 @@ "cell_type": "code", "execution_count": 30, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -653,9 +546,7 @@ "cell_type": "code", "execution_count": 31, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -668,11 +559,7 @@ { "cell_type": "code", "execution_count": 32, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "print(outputs_val)" @@ -681,11 +568,7 @@ { "cell_type": "code", "execution_count": 33, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "print(states_val)" @@ -693,20 +576,14 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "## Training a sequence classifier" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "Note: the book uses `tensorflow.contrib.layers.fully_connected()` rather than `tf.layers.dense()` (which did not exist when this chapter was written). It is now preferable to use `tf.layers.dense()`, because anything in the contrib module may change or be deleted without notice. The `dense()` function is almost identical to the `fully_connected()` function. The main differences relevant to this chapter are:\n", "* several parameters are renamed: `scope` becomes `name`, `activation_fn` becomes `activation` (and similarly the `_fn` suffix is removed from other parameters such as `normalizer_fn`), `weights_initializer` becomes `kernel_initializer`, etc.\n", @@ -717,9 +594,7 @@ "cell_type": "code", "execution_count": 34, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -753,11 +628,7 @@ { "cell_type": "code", "execution_count": 35, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "from tensorflow.examples.tutorials.mnist import input_data\n", @@ -769,11 +640,7 @@ { "cell_type": "code", "execution_count": 36, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "n_epochs = 100\n", @@ -793,10 +660,7 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "# Multi-layer RNN" ] @@ -805,9 +669,7 @@ "cell_type": "code", "execution_count": 37, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -827,9 +689,7 @@ "cell_type": "code", "execution_count": 38, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -847,9 +707,7 @@ "cell_type": "code", "execution_count": 39, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -868,11 +726,7 @@ { "cell_type": "code", "execution_count": 40, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "n_epochs = 10\n", @@ -892,10 +746,7 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "# Time series" ] @@ -904,9 +755,7 @@ "cell_type": "code", "execution_count": 41, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -926,11 +775,7 @@ { "cell_type": "code", "execution_count": 42, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "t = np.linspace(t_min, t_max, int((t_max - t_min) / resolution))\n", @@ -964,9 +809,7 @@ "cell_type": "code", "execution_count": 43, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -976,11 +819,7 @@ { "cell_type": "code", "execution_count": 44, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "np.c_[X_batch[0], y_batch[0]]" @@ -988,20 +827,14 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "## Using an `OuputProjectionWrapper`" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "Let's create the RNN. It will contain 100 recurrent neurons and we will unroll it over 20 time steps since each traiing instance will be 20 inputs long. Each input will contain only one feature (the value at that time). The targets are also sequences of 20 inputs, each containing a sigle value:" ] @@ -1010,9 +843,7 @@ "cell_type": "code", "execution_count": 45, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1032,10 +863,7 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "At each time step we now have an output vector of size 100. But what we actually want is a single output value at each time step. The simplest solution is to wrap the cell in an `OutputProjectionWrapper`." ] @@ -1044,9 +872,7 @@ "cell_type": "code", "execution_count": 46, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1065,9 +891,7 @@ "cell_type": "code", "execution_count": 47, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1080,9 +904,7 @@ "cell_type": "code", "execution_count": 48, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1093,9 +915,7 @@ "cell_type": "code", "execution_count": 49, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1112,9 +932,7 @@ "cell_type": "code", "execution_count": 50, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1124,11 +942,7 @@ { "cell_type": "code", "execution_count": 51, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "n_iterations = 1500\n", @@ -1149,11 +963,7 @@ { "cell_type": "code", "execution_count": 52, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "with tf.Session() as sess: # not shown in the book\n", @@ -1166,11 +976,7 @@ { "cell_type": "code", "execution_count": 53, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "y_pred" @@ -1179,11 +985,7 @@ { "cell_type": "code", "execution_count": 54, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "plt.title(\"Testing the model\", fontsize=14)\n", @@ -1199,10 +1001,7 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "## Without using an `OutputProjectionWrapper`" ] @@ -1211,9 +1010,7 @@ "cell_type": "code", "execution_count": 55, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1231,9 +1028,7 @@ "cell_type": "code", "execution_count": 56, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1245,9 +1040,7 @@ "cell_type": "code", "execution_count": 57, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1259,9 +1052,7 @@ "cell_type": "code", "execution_count": 58, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1274,9 +1065,7 @@ "cell_type": "code", "execution_count": 59, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1291,11 +1080,7 @@ { "cell_type": "code", "execution_count": 60, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "n_iterations = 1500\n", @@ -1319,11 +1104,7 @@ { "cell_type": "code", "execution_count": 61, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "y_pred" @@ -1332,11 +1113,7 @@ { "cell_type": "code", "execution_count": 62, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "plt.title(\"Testing the model\", fontsize=14)\n", @@ -1351,10 +1128,7 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "## Generating a creative new sequence" ] @@ -1362,11 +1136,7 @@ { "cell_type": "code", "execution_count": 63, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "with tf.Session() as sess: # not shown in the book\n", @@ -1382,11 +1152,7 @@ { "cell_type": "code", "execution_count": 64, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "plt.figure(figsize=(8,4))\n", @@ -1400,11 +1166,7 @@ { "cell_type": "code", "execution_count": 65, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "with tf.Session() as sess:\n", @@ -1439,20 +1201,14 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "# Deep RNN" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "## MultiRNNCell" ] @@ -1461,9 +1217,7 @@ "cell_type": "code", "execution_count": 66, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1479,9 +1233,7 @@ "cell_type": "code", "execution_count": 67, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1498,9 +1250,7 @@ "cell_type": "code", "execution_count": 68, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1511,9 +1261,7 @@ "cell_type": "code", "execution_count": 69, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1524,9 +1272,7 @@ "cell_type": "code", "execution_count": 70, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1538,11 +1284,7 @@ { "cell_type": "code", "execution_count": 71, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "outputs_val.shape" @@ -1550,20 +1292,14 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "## Distributing a Deep RNN Across Multiple GPUs" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "Do **NOT** do this:" ] @@ -1572,9 +1308,7 @@ "cell_type": "code", "execution_count": 72, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1587,10 +1321,7 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "Instead, you need a `DeviceCellWrapper`:" ] @@ -1599,9 +1330,7 @@ "cell_type": "code", "execution_count": 73, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1629,9 +1358,7 @@ "cell_type": "code", "execution_count": 74, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1648,9 +1375,7 @@ "cell_type": "code", "execution_count": 75, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1665,9 +1390,7 @@ "cell_type": "code", "execution_count": 76, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1678,9 +1401,6 @@ "cell_type": "code", "execution_count": 77, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true, "scrolled": true }, "outputs": [], @@ -1692,10 +1412,7 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "## Dropout" ] @@ -1704,9 +1421,7 @@ "cell_type": "code", "execution_count": 78, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1726,9 +1441,7 @@ "cell_type": "code", "execution_count": 79, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1746,9 +1459,7 @@ "cell_type": "code", "execution_count": 80, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1768,10 +1479,7 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "Unfortunately, this code is only usable for training, because the `DropoutWrapper` class has no `training` parameter, so it always applies dropout, even when the model is not being trained, so we must first train the model, then create a different model for testing, without the `DropoutWrapper`." ] @@ -1779,11 +1487,7 @@ { "cell_type": "code", "execution_count": 81, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "n_iterations = 1000\n", @@ -1802,10 +1506,7 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "Now that the model is trained, we need to create the model again, but without the `DropoutWrapper` for testing:" ] @@ -1814,9 +1515,7 @@ "cell_type": "code", "execution_count": 82, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1853,11 +1552,7 @@ { "cell_type": "code", "execution_count": 83, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "with tf.Session() as sess:\n", @@ -1878,20 +1573,14 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "Oops, it seems that Dropout does not help at all in this particular case. :/" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "Another option is to write a script with a command line argument to specify whether you want to train the mode or use it for making predictions:" ] @@ -1899,11 +1588,7 @@ { "cell_type": "code", "execution_count": 84, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "reset_graph()\n", @@ -1948,10 +1633,7 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "# LSTM" ] @@ -1960,9 +1642,7 @@ "cell_type": "code", "execution_count": 85, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -1975,9 +1655,7 @@ "cell_type": "code", "execution_count": 86, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -2011,11 +1689,7 @@ { "cell_type": "code", "execution_count": 87, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "states" @@ -2024,11 +1698,7 @@ { "cell_type": "code", "execution_count": 88, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "top_layer_h_state" @@ -2038,9 +1708,6 @@ "cell_type": "code", "execution_count": 89, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true, "scrolled": true }, "outputs": [], @@ -2064,9 +1731,7 @@ "cell_type": "code", "execution_count": 90, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -2077,9 +1742,7 @@ "cell_type": "code", "execution_count": 91, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -2088,30 +1751,21 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "# Embeddings" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "This section is based on TensorFlow's [Word2Vec tutorial](https://www.tensorflow.org/versions/r0.11/tutorials/word2vec/index.html)." ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "## Fetch the data" ] @@ -2120,9 +1774,7 @@ "cell_type": "code", "execution_count": 92, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -2163,9 +1815,7 @@ "cell_type": "code", "execution_count": 93, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -2175,11 +1825,7 @@ { "cell_type": "code", "execution_count": 94, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "words[:5]" @@ -2187,10 +1833,7 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "## Build the dictionary" ] @@ -2199,9 +1842,7 @@ "cell_type": "code", "execution_count": 95, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -2218,11 +1859,7 @@ { "cell_type": "code", "execution_count": 96, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "\" \".join(words[:9]), data[:9]" @@ -2231,11 +1868,7 @@ { "cell_type": "code", "execution_count": 97, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "\" \".join([vocabulary[word_index] for word_index in [5241, 3081, 12, 6, 195, 2, 3134, 46, 59]])" @@ -2244,11 +1877,7 @@ { "cell_type": "code", "execution_count": 98, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "words[24], data[24]" @@ -2256,10 +1885,7 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "## Generate batches" ] @@ -2268,9 +1894,7 @@ "cell_type": "code", "execution_count": 99, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -2306,9 +1930,7 @@ "cell_type": "code", "execution_count": 100, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -2319,11 +1941,7 @@ { "cell_type": "code", "execution_count": 101, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "batch, [vocabulary[word] for word in batch]" @@ -2332,11 +1950,7 @@ { "cell_type": "code", "execution_count": 102, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "labels, [vocabulary[word] for word in labels[:, 0]]" @@ -2344,10 +1958,7 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "## Build the model" ] @@ -2356,9 +1967,7 @@ "cell_type": "code", "execution_count": 103, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -2382,9 +1991,7 @@ "cell_type": "code", "execution_count": 104, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -2399,9 +2006,7 @@ "cell_type": "code", "execution_count": 105, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -2417,9 +2022,7 @@ "cell_type": "code", "execution_count": 106, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -2431,9 +2034,7 @@ "cell_type": "code", "execution_count": 107, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -2466,10 +2067,7 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "## Train the model" ] @@ -2477,11 +2075,7 @@ { "cell_type": "code", "execution_count": 108, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "num_steps = 10001\n", @@ -2525,10 +2119,7 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "Let's save the final embeddings (of course you can use a TensorFlow `Saver` if you prefer):" ] @@ -2537,9 +2128,7 @@ "cell_type": "code", "execution_count": 109, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -2548,10 +2137,7 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "## Plot the embeddings" ] @@ -2560,9 +2146,7 @@ "cell_type": "code", "execution_count": 110, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -2583,11 +2167,7 @@ { "cell_type": "code", "execution_count": 111, - "metadata": { - "collapsed": false, - "deletable": true, - "editable": true - }, + "metadata": {}, "outputs": [], "source": [ "from sklearn.manifold import TSNE\n", @@ -2601,20 +2181,14 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "# Machine Translation" ] }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ "The `basic_rnn_seq2seq()` function creates a simple Encoder/Decoder model: it first runs an RNN to encode `encoder_inputs` into a state vector, then runs a decoder initialized with the last encoder state on `decoder_inputs`. Encoder and decoder use the same RNN cell type but they don't share parameters." ] @@ -2623,9 +2197,7 @@ "cell_type": "code", "execution_count": 112, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -2668,9 +2240,7 @@ "cell_type": "code", "execution_count": 113, "metadata": { - "collapsed": false, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [ @@ -2688,9 +2258,7 @@ { "cell_type": "markdown", "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "source": [ "# Exercise solutions" @@ -2698,13 +2266,428 @@ }, { "cell_type": "markdown", - "metadata": { - "deletable": true, - "editable": true - }, + "metadata": {}, "source": [ - "**Coming soon**" + "## 1. to 6." ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "See Appendix A." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 7. Embedded Reber Grammars" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First we need to build a function that generates strings based on a grammar. The grammar will be represented as a list of possible transitions for each state. A transition specifies the string to output (or a grammar to generate it) and the next state." + ] + }, + { + "cell_type": "code", + "execution_count": 114, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "from random import choice, seed\n", + "\n", + "# to make this notebook's output stable across runs\n", + "seed(42)\n", + "np.random.seed(42)\n", + "\n", + "default_reber_grammar = [\n", + " [(\"B\", 1)], # (state 0) =B=>(state 1)\n", + " [(\"T\", 2), (\"P\", 3)], # (state 1) =T=>(state 2) or =P=>(state 3)\n", + " [(\"S\", 2), (\"X\", 4)], # (state 2) =S=>(state 2) or =X=>(state 4)\n", + " [(\"T\", 3), (\"V\", 5)], # and so on...\n", + " [(\"X\", 3), (\"S\", 6)],\n", + " [(\"P\", 4), (\"V\", 6)],\n", + " [(\"E\", None)]] # (state 6) =E=>(terminal state)\n", + "\n", + "embedded_reber_grammar = [\n", + " [(\"B\", 1)],\n", + " [(\"T\", 2), (\"P\", 3)],\n", + " [(default_reber_grammar, 4)],\n", + " [(default_reber_grammar, 5)],\n", + " [(\"T\", 6)],\n", + " [(\"P\", 6)],\n", + " [(\"E\", None)]]\n", + "\n", + "def generate_string(grammar):\n", + " state = 0\n", + " output = []\n", + " while state is not None:\n", + " production, state = choice(grammar[state])\n", + " if isinstance(production, list):\n", + " production = generate_string(grammar=production)\n", + " output.append(production)\n", + " return \"\".join(output)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's generate a few strings based on the default Reber grammar:" + ] + }, + { + "cell_type": "code", + "execution_count": 115, + "metadata": {}, + "outputs": [], + "source": [ + "for _ in range(25):\n", + " print(generate_string(default_reber_grammar), end=\" \")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Looks good. Now let's generate a few strings based on the embedded Reber grammar:" + ] + }, + { + "cell_type": "code", + "execution_count": 116, + "metadata": {}, + "outputs": [], + "source": [ + "for _ in range(25):\n", + " print(generate_string(embedded_reber_grammar), end=\" \")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Okay, now we need a function to generate strings that do not respect the grammar. We could generate a random string, but the task would be a bit too easy, so instead we will generate a string that respects the grammar, and we will corrupt it by changing just one character:" + ] + }, + { + "cell_type": "code", + "execution_count": 117, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "def generate_corrupted_string(grammar, chars=\"BEPSTVX\"):\n", + " good_string = generate_string(grammar)\n", + " index = np.random.randint(len(good_string))\n", + " good_char = good_string[index]\n", + " bad_char = choice(list(set(chars) - set(good_char)))\n", + " return good_string[:index] + bad_char + good_string[index + 1:]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's look at a few corrupted strings:" + ] + }, + { + "cell_type": "code", + "execution_count": 118, + "metadata": {}, + "outputs": [], + "source": [ + "for _ in range(25):\n", + " print(generate_corrupted_string(embedded_reber_grammar), end=\" \")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It's not possible to feed a string directly to an RNN: we need to convert it to a sequence of vectors, first. Each vector will represent a single letter, using a one-hot encoding. For example, the letter \"B\" will be represented as the vector `[1, 0, 0, 0, 0, 0, 0]`, the letter E will be represented as `[0, 1, 0, 0, 0, 0, 0]` and so on. Let's write a function that converts a string to a sequence of such one-hot vectors. Note that if the string is shorted than `n_steps`, it will be padded with zero vectors (later, we will tell TensorFlow how long each string actually is using the `sequence_length` parameter)." + ] + }, + { + "cell_type": "code", + "execution_count": 119, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "def string_to_one_hot_vectors(string, n_steps, chars=\"BEPSTVX\"):\n", + " char_to_index = {char: index for index, char in enumerate(chars)}\n", + " output = np.zeros((n_steps, len(chars)), dtype=np.int32)\n", + " for index, char in enumerate(string):\n", + " output[index, char_to_index[char]] = 1.\n", + " return output" + ] + }, + { + "cell_type": "code", + "execution_count": 120, + "metadata": {}, + "outputs": [], + "source": [ + "string_to_one_hot_vectors(\"BTBTXSETE\", 12)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can now generate the dataset, with 50% good strings, and 50% bad strings:" + ] + }, + { + "cell_type": "code", + "execution_count": 121, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "def generate_dataset(size):\n", + " good_strings = [generate_string(embedded_reber_grammar)\n", + " for _ in range(size // 2)]\n", + " bad_strings = [generate_corrupted_string(embedded_reber_grammar)\n", + " for _ in range(size - size // 2)]\n", + " all_strings = good_strings + bad_strings\n", + " n_steps = max([len(string) for string in all_strings])\n", + " X = np.array([string_to_one_hot_vectors(string, n_steps)\n", + " for string in all_strings])\n", + " seq_length = np.array([len(string) for string in all_strings])\n", + " y = np.array([[1] for _ in range(len(good_strings))] +\n", + " [[0] for _ in range(len(bad_strings))])\n", + " rnd_idx = np.random.permutation(size)\n", + " return X[rnd_idx], seq_length[rnd_idx], y[rnd_idx]" + ] + }, + { + "cell_type": "code", + "execution_count": 122, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "X_train, l_train, y_train = generate_dataset(10000)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's take a look at the first training instances:" + ] + }, + { + "cell_type": "code", + "execution_count": 123, + "metadata": {}, + "outputs": [], + "source": [ + "X_train[0]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It's padded with a lot of zeros because the longest string in the dataset is that long. How long is this particular string?" + ] + }, + { + "cell_type": "code", + "execution_count": 124, + "metadata": {}, + "outputs": [], + "source": [ + "l_train[0]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "What class is it?" + ] + }, + { + "cell_type": "code", + "execution_count": 125, + "metadata": {}, + "outputs": [], + "source": [ + "y_train[0]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Perfect! We are ready to create the RNN to identify good strings. We build a sequence classifier very similar to the one we built earlier to classify MNIST images, with two main differences:\n", + "* First, the input strings have variable length, so we need to specify the `sequence_length` when calling the `dynamic_rnn()` function.\n", + "* Second, this is a binary classifier, so we only need one output neuron that will output, for each input string, the estimated log probability that it is a good string. For multiclass classification, we used `sparse_softmax_cross_entropy_with_logits()` but for binary classification we use `sigmoid_cross_entropy_with_logits()`.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 126, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "reset_graph()\n", + "\n", + "possible_chars = \"BEPSTVX\"\n", + "n_inputs = len(possible_chars)\n", + "n_neurons = 30\n", + "n_outputs = 1\n", + "\n", + "learning_rate = 0.02\n", + "momentum = 0.95\n", + "\n", + "X = tf.placeholder(tf.float32, [None, None, n_inputs], name=\"X\")\n", + "seq_length = tf.placeholder(tf.int32, [None], name=\"seq_length\")\n", + "y = tf.placeholder(tf.float32, [None, 1], name=\"y\")\n", + "\n", + "gru_cell = tf.contrib.rnn.GRUCell(num_units=n_neurons)\n", + "outputs, states = tf.nn.dynamic_rnn(gru_cell, X, dtype=tf.float32,\n", + " sequence_length=seq_length)\n", + "\n", + "logits = tf.layers.dense(states, n_outputs, name=\"logits\")\n", + "y_pred = tf.cast(tf.greater(logits, 0.), tf.float32, name=\"y_pred\")\n", + "y_proba = tf.nn.sigmoid(logits, name=\"y_proba\")\n", + "\n", + "xentropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)\n", + "loss = tf.reduce_mean(xentropy, name=\"loss\")\n", + "optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,\n", + " momentum=momentum,\n", + " use_nesterov=True)\n", + "training_op = optimizer.minimize(loss)\n", + "\n", + "correct = tf.equal(y_pred, y, name=\"correct\")\n", + "accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name=\"accuracy\")\n", + "\n", + "init = tf.global_variables_initializer()\n", + "saver = tf.train.Saver()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's generate a validation set so we can track progress during training:" + ] + }, + { + "cell_type": "code", + "execution_count": 127, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "X_val, l_val, y_val = generate_dataset(5000)" + ] + }, + { + "cell_type": "code", + "execution_count": 128, + "metadata": {}, + "outputs": [], + "source": [ + "n_epochs = 50\n", + "batch_size = 50\n", + "\n", + "with tf.Session() as sess:\n", + " init.run()\n", + " for epoch in range(n_epochs):\n", + " X_batches = np.array_split(X_train, len(X_train) // batch_size)\n", + " l_batches = np.array_split(l_train, len(l_train) // batch_size)\n", + " y_batches = np.array_split(y_train, len(y_train) // batch_size)\n", + " for X_batch, l_batch, y_batch in zip(X_batches, l_batches, y_batches):\n", + " loss_val, _ = sess.run(\n", + " [loss, training_op],\n", + " feed_dict={X: X_batch, seq_length: l_batch, y: y_batch})\n", + " acc_train = accuracy.eval(feed_dict={X: X_batch, seq_length: l_batch, y: y_batch})\n", + " acc_val = accuracy.eval(feed_dict={X: X_val, seq_length: l_val, y: y_val})\n", + " print(\"{:4d} Train loss: {:.4f}, accuracy: {:.2f}% Validation accuracy: {:.2f}%\".format(\n", + " epoch, loss_val, 100 * acc_train, 100 * acc_val))\n", + " saver.save(sess, \"my_reber_classifier\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's test our RNN on two tricky strings: the first one is bad while the second one is good. They only differ by the second to last character. If the RNN gets this right, it shows that it managed to notice the pattern that the second letter should always be equal to the second to last letter. That requires a fairly long short-term memory (which is the reason why we used a GRU cell)." + ] + }, + { + "cell_type": "code", + "execution_count": 129, + "metadata": {}, + "outputs": [], + "source": [ + "test_strings = [\n", + " \"BPBTSSSSSSSSSSSSXXTTTTTVPXTTVPXTTTTTTTVPXVPXVPXTTTVVETE\",\n", + " \"BPBTSSSSSSSSSSSSXXTTTTTVPXTTVPXTTTTTTTVPXVPXVPXTTTVVEPE\"]\n", + "l_test = np.array([len(s) for s in test_strings])\n", + "max_length = l_test.max()\n", + "X_test = [string_to_one_hot_vectors(s, n_steps=max_length)\n", + " for s in test_strings]\n", + "\n", + "with tf.Session() as sess:\n", + " saver.restore(sess, \"my_reber_classifier\")\n", + " y_proba_val = y_proba.eval(feed_dict={X: X_test, seq_length: l_test})\n", + "\n", + "print()\n", + "print(\"Estimated probability that these are Reber strings:\")\n", + "for index, string in enumerate(test_strings):\n", + " print(\"{}: {:.2f}%\".format(string, y_proba_val[index][0]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Ta-da! It worked fine. The RNN found the correct answers with absolute confidence. :)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 8. and 9." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Coming soon..." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [] } ], "metadata": { @@ -2723,7 +2706,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.5.3" + "version": "3.5.2" }, "nav_menu": {}, "toc": { @@ -2737,5 +2720,5 @@ } }, "nbformat": 4, - "nbformat_minor": 0 + "nbformat_minor": 1 }