diff --git a/04_training_linear_models.ipynb b/04_training_linear_models.ipynb index b9bc7b2..4b084d6 100644 --- a/04_training_linear_models.ipynb +++ b/04_training_linear_models.ipynb @@ -101,8 +101,10 @@ }, "outputs": [], "source": [ - "X = 2 * rnd.rand(100, 1)\n", - "y = 4 + 3 * X + rnd.randn(100, 1)" + "import numpy as np\n", + "\n", + "X = 2 * np.random.rand(100, 1)\n", + "y = 4 + 3 * X + np.random.randn(100, 1)" ] }, { @@ -133,10 +135,8 @@ }, "outputs": [], "source": [ - "import numpy.linalg as LA\n", - "\n", "X_b = np.c_[np.ones((100, 1)), X] # add x0 = 1 to each instance\n", - "theta_best = LA.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)" + "theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)" ] }, { @@ -177,6 +177,32 @@ "editable": true }, "outputs": [], + "source": [ + "plt.plot(X_new, y_predict, \"r-\")\n", + "plt.plot(X, y, \"b.\")\n", + "plt.axis([0, 2, 0, 15])\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "deletable": true, + "editable": true + }, + "source": [ + "The figure in the book actually corresponds to the following code, with a legend and axis labels:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "collapsed": false, + "deletable": true, + "editable": true + }, + "outputs": [], "source": [ "plt.plot(X_new, y_predict, \"r-\", linewidth=2, label=\"Predictions\")\n", "plt.plot(X, y, \"b.\")\n", @@ -190,7 +216,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 9, "metadata": { "collapsed": false, "deletable": true, @@ -206,7 +232,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 10, "metadata": { "collapsed": false, "deletable": true, @@ -229,7 +255,47 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 11, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "eta = 0.1\n", + "n_iterations = 1000\n", + "m = 100\n", + "theta = np.random.randn(2,1)\n", + "\n", + "for iteration in range(n_iterations):\n", + " gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y)\n", + " theta = theta - eta * gradients" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "theta" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "X_new_b.dot(theta)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, "metadata": { "collapsed": false, "deletable": true, @@ -269,6 +335,27 @@ "plt.show()" ] }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "rnd.seed(42)\n", + "theta = rnd.randn(2,1) # random initialization\n", + "\n", + "plt.figure(figsize=(10,4))\n", + "plt.subplot(131); plot_gradient_descent(theta, eta=0.02)\n", + "plt.ylabel(\"$y$\", rotation=0, fontsize=18)\n", + "plt.subplot(132); plot_gradient_descent(theta, eta=0.1, theta_path=theta_path_bgd)\n", + "plt.subplot(133); plot_gradient_descent(theta, eta=0.5)\n", + "\n", + "save_fig(\"gradient_descent_plot\")\n", + "plt.show()" + ] + }, { "cell_type": "markdown", "metadata": { @@ -281,7 +368,20 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 16, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "theta_path_sgd = []\n", + "m = len(X_b)\n", + "rnd.seed(42)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, "metadata": { "collapsed": false, "deletable": true, @@ -289,44 +389,39 @@ }, "outputs": [], "source": [ - "theta_path_sgd = []\n", - "\n", - "n_iterations = 50\n", + "n_epochs = 50\n", "t0, t1 = 5, 50 # learning schedule hyperparameters\n", "\n", - "rnd.seed(42)\n", - "theta = rnd.randn(2,1) # random initialization\n", - "\n", "def learning_schedule(t):\n", " return t0 / (t + t1)\n", "\n", - "m = len(X_b)\n", + "theta = np.random.randn(2,1) # random initialization\n", "\n", - "for epoch in range(n_iterations):\n", + "for epoch in range(n_epochs):\n", " for i in range(m):\n", - " if epoch == 0 and i < 20:\n", - " y_predict = X_new_b.dot(theta)\n", - " style = \"b-\" if i > 0 else \"r--\"\n", - " plt.plot(X_new, y_predict, style)\n", - " random_index = rnd.randint(m)\n", + " if epoch == 0 and i < 20: # not shown in the book\n", + " y_predict = X_new_b.dot(theta) # not shown\n", + " style = \"b-\" if i > 0 else \"r--\" # not shown\n", + " plt.plot(X_new, y_predict, style) # not shown\n", + " random_index = np.random.randint(m)\n", " xi = X_b[random_index:random_index+1]\n", " yi = y[random_index:random_index+1]\n", " gradients = 2 * xi.T.dot(xi.dot(theta) - yi)\n", " eta = learning_schedule(epoch * m + i)\n", " theta = theta - eta * gradients\n", - " theta_path_sgd.append(theta)\n", + " theta_path_sgd.append(theta) # not shown\n", "\n", - "plt.plot(X, y, \"b.\")\n", - "plt.xlabel(\"$x_1$\", fontsize=18)\n", - "plt.ylabel(\"$y$\", rotation=0, fontsize=18)\n", - "plt.axis([0, 2, 0, 15])\n", - "save_fig(\"sgd_plot\")\n", - "plt.show()" + "plt.plot(X, y, \"b.\") # not shown\n", + "plt.xlabel(\"$x_1$\", fontsize=18) # not shown\n", + "plt.ylabel(\"$y$\", rotation=0, fontsize=18) # not shown\n", + "plt.axis([0, 2, 0, 15]) # not shown\n", + "save_fig(\"sgd_plot\") # not shown\n", + "plt.show() # not shown" ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 18, "metadata": { "collapsed": false, "deletable": true, @@ -339,7 +434,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 19, "metadata": { "collapsed": false, "deletable": true, @@ -354,7 +449,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 20, "metadata": { "collapsed": false, "deletable": true, @@ -377,7 +472,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 21, "metadata": { "collapsed": true, "deletable": true, @@ -414,7 +509,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 22, "metadata": { "collapsed": false, "deletable": true, @@ -427,7 +522,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 23, "metadata": { "collapsed": false, "deletable": true, @@ -442,7 +537,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 24, "metadata": { "collapsed": false, "deletable": true, @@ -474,7 +569,21 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 25, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import numpy as np\n", + "import numpy.random as rnd\n", + "\n", + "rnd.seed(42)" + ] + }, + { + "cell_type": "code", + "execution_count": 26, "metadata": { "collapsed": true, "deletable": true, @@ -482,18 +591,14 @@ }, "outputs": [], "source": [ - "import numpy as np\n", - "import numpy.random as rnd\n", - "\n", - "rnd.seed(42)\n", "m = 100\n", - "X = 6 * rnd.rand(m, 1) - 3\n", - "y = 2 + X + 0.5 * X**2 + rnd.randn(m, 1)" + "X = 6 * np.random.rand(m, 1) - 3\n", + "y = 0.5 * X**2 + X + 2 + np.random.randn(m, 1)" ] }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 27, "metadata": { "collapsed": false, "deletable": true, @@ -511,7 +616,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 28, "metadata": { "collapsed": false, "deletable": true, @@ -527,7 +632,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 29, "metadata": { "collapsed": false, "deletable": true, @@ -540,7 +645,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 30, "metadata": { "collapsed": false, "deletable": true, @@ -555,7 +660,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 31, "metadata": { "collapsed": false, "deletable": true, @@ -578,7 +683,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 32, "metadata": { "collapsed": false, "deletable": true, @@ -613,7 +718,7 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 33, "metadata": { "collapsed": false, "deletable": true, @@ -634,22 +739,31 @@ " train_errors.append(mean_squared_error(y_train_predict, y_train[:m]))\n", " val_errors.append(mean_squared_error(y_val_predict, y_val))\n", "\n", - " plt.plot(np.sqrt(train_errors), \"r-+\", linewidth=2, label=\"Training set\")\n", - " plt.plot(np.sqrt(val_errors), \"b-\", linewidth=3, label=\"Validation set\")\n", - " plt.legend(loc=\"upper right\", fontsize=14)\n", - " plt.xlabel(\"Training set size\", fontsize=14)\n", - " plt.ylabel(\"RMSE\", fontsize=14)\n", - "\n", - "lin_reg = LinearRegression()\n", - "plot_learning_curves(lin_reg, X, y)\n", - "plt.axis([0, 80, 0, 3])\n", - "save_fig(\"underfitting_learning_curves_plot\")\n", - "plt.show()" + " plt.plot(np.sqrt(train_errors), \"r-+\", linewidth=2, label=\"train\")\n", + " plt.plot(np.sqrt(val_errors), \"b-\", linewidth=3, label=\"val\")\n", + " plt.legend(loc=\"upper right\", fontsize=14) # not shown in the book\n", + " plt.xlabel(\"Training set size\", fontsize=14) # not shown\n", + " plt.ylabel(\"RMSE\", fontsize=14) # not shown" ] }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 34, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "lin_reg = LinearRegression()\n", + "plot_learning_curves(lin_reg, X, y)\n", + "plt.axis([0, 80, 0, 3]) # not shown in the book\n", + "save_fig(\"underfitting_learning_curves_plot\") # not shown\n", + "plt.show() # not shown" + ] + }, + { + "cell_type": "code", + "execution_count": 35, "metadata": { "collapsed": false, "deletable": true, @@ -665,9 +779,9 @@ " ))\n", "\n", "plot_learning_curves(polynomial_regression, X, y)\n", - "plt.axis([0, 80, 0, 3])\n", - "save_fig(\"learning_curves_plot\")\n", - "plt.show()" + "plt.axis([0, 80, 0, 3]) # not shown\n", + "save_fig(\"learning_curves_plot\") # not shown\n", + "plt.show() # not shown" ] }, { @@ -682,7 +796,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 36, "metadata": { "collapsed": false, "deletable": true, @@ -729,7 +843,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 37, "metadata": { "collapsed": false, "deletable": true, @@ -745,7 +859,7 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 38, "metadata": { "collapsed": false, "deletable": true, @@ -760,7 +874,7 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 39, "metadata": { "collapsed": false, "deletable": true, @@ -775,7 +889,7 @@ }, { "cell_type": "code", - "execution_count": 32, + "execution_count": 40, "metadata": { "collapsed": false, "deletable": true, @@ -798,7 +912,7 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 41, "metadata": { "collapsed": false, "deletable": true, @@ -814,7 +928,7 @@ }, { "cell_type": "code", - "execution_count": 34, + "execution_count": 42, "metadata": { "collapsed": false, "deletable": true, @@ -830,7 +944,7 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 43, "metadata": { "collapsed": false, "deletable": true, @@ -894,7 +1008,7 @@ }, { "cell_type": "code", - "execution_count": 36, + "execution_count": 44, "metadata": { "collapsed": false, "deletable": true, @@ -904,8 +1018,7 @@ "source": [ "from sklearn.base import clone\n", "sgd_reg = SGDRegressor(n_iter=1, warm_start=True, penalty=None,\n", - " learning_rate=\"constant\", eta0=0.0005,\n", - " random_state=42)\n", + " learning_rate=\"constant\", eta0=0.0005, random_state=42)\n", "\n", "minimum_val_error = float(\"inf\")\n", "best_epoch = None\n", @@ -917,14 +1030,23 @@ " if val_error < minimum_val_error:\n", " minimum_val_error = val_error\n", " best_epoch = epoch\n", - " best_model = clone(sgd_reg)\n", - "\n", + " best_model = clone(sgd_reg)" + ] + }, + { + "cell_type": "code", + "execution_count": 45, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ "best_epoch, best_model" ] }, { "cell_type": "code", - "execution_count": 37, + "execution_count": 46, "metadata": { "collapsed": false, "deletable": true, @@ -934,12 +1056,12 @@ "source": [ "%matplotlib inline\n", "import matplotlib.pyplot as plt\n", - "import numpy as np\n" + "import numpy as np" ] }, { "cell_type": "code", - "execution_count": 38, + "execution_count": 47, "metadata": { "collapsed": false, "deletable": true, @@ -970,7 +1092,7 @@ }, { "cell_type": "code", - "execution_count": 39, + "execution_count": 48, "metadata": { "collapsed": false, "deletable": true, @@ -1048,7 +1170,7 @@ }, { "cell_type": "code", - "execution_count": 40, + "execution_count": 49, "metadata": { "collapsed": false, "deletable": true, @@ -1073,7 +1195,7 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": 50, "metadata": { "collapsed": false, "deletable": true, @@ -1088,7 +1210,7 @@ }, { "cell_type": "code", - "execution_count": 42, + "execution_count": 51, "metadata": { "collapsed": false, "deletable": true, @@ -1101,7 +1223,54 @@ }, { "cell_type": "code", - "execution_count": 43, + "execution_count": 52, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "X = iris[\"data\"][:, 3:] # petal width\n", + "y = (iris[\"target\"] == 2).astype(np.int) # 1 if Iris-Virginica, else 0" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from sklearn.linear_model import LogisticRegression\n", + "log_reg = LogisticRegression()\n", + "log_reg.fit(X, y)" + ] + }, + { + "cell_type": "code", + "execution_count": 54, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "X_new = np.linspace(0, 3, 1000).reshape(-1, 1)\n", + "y_proba = log_reg.predict_proba(X_new)\n", + "\n", + "plt.plot(X_new, y_proba[:, 1], \"g-\", linewidth=2, label=\"Iris-Virginica\")\n", + "plt.plot(X_new, y_proba[:, 0], \"b--\", linewidth=2, label=\"Not Iris-Virginica\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The figure in the book actually is actually a bit fancier:" + ] + }, + { + "cell_type": "code", + "execution_count": 55, "metadata": { "collapsed": false, "deletable": true, @@ -1109,14 +1278,6 @@ }, "outputs": [], "source": [ - "from sklearn.linear_model import LogisticRegression\n", - "\n", - "X = iris[\"data\"][:, 3:] # petal width\n", - "y = (iris[\"target\"] == 2).astype(np.int) # 1 if Iris-Virginica, else 0\n", - "\n", - "log_reg = LogisticRegression()\n", - "log_reg.fit(X, y)\n", - "\n", "X_new = np.linspace(0, 3, 1000).reshape(-1, 1)\n", "y_proba = log_reg.predict_proba(X_new)\n", "decision_boundary = X_new[y_proba[:, 1] >= 0.5][0]\n", @@ -1140,7 +1301,7 @@ }, { "cell_type": "code", - "execution_count": 44, + "execution_count": 56, "metadata": { "collapsed": false, "deletable": true, @@ -1153,7 +1314,7 @@ }, { "cell_type": "code", - "execution_count": 45, + "execution_count": 57, "metadata": { "collapsed": false, "deletable": true, @@ -1166,7 +1327,7 @@ }, { "cell_type": "code", - "execution_count": 46, + "execution_count": 58, "metadata": { "collapsed": false, "deletable": true, @@ -1214,7 +1375,7 @@ }, { "cell_type": "code", - "execution_count": 47, + "execution_count": 59, "metadata": { "collapsed": false, "deletable": true, @@ -1222,14 +1383,21 @@ }, "outputs": [], "source": [ - "from sklearn.linear_model import LogisticRegression\n", - "\n", "X = iris[\"data\"][:, (2, 3)] # petal length, petal width\n", "y = iris[\"target\"]\n", "\n", - "softmax_reg = LogisticRegression(multi_class=\"multinomial\", solver=\"lbfgs\", C=10)\n", - "softmax_reg.fit(X, y)\n", - "\n", + "softmax_reg = LogisticRegression(multi_class=\"multinomial\",solver=\"lbfgs\", C=10)\n", + "softmax_reg.fit(X, y)" + ] + }, + { + "cell_type": "code", + "execution_count": 60, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ "x0, x1 = np.meshgrid(\n", " np.linspace(0, 8, 500).reshape(-1, 1),\n", " np.linspace(0, 3.5, 200).reshape(-1, 1),\n", @@ -1264,7 +1432,7 @@ }, { "cell_type": "code", - "execution_count": 48, + "execution_count": 61, "metadata": { "collapsed": false, "deletable": true, @@ -1277,7 +1445,7 @@ }, { "cell_type": "code", - "execution_count": 49, + "execution_count": 62, "metadata": { "collapsed": false, "deletable": true, @@ -1300,21 +1468,535 @@ }, { "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. to 11." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "See appendix A." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 12. Batch Gradient Descent with early stopping for Softmax Regression\n", + "(without using Scikit-Learn)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's start by loading the data. We will just reuse the Iris dataset we loaded earlier." + ] + }, + { + "cell_type": "code", + "execution_count": 63, "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "X = iris[\"data\"][:, (2, 3)] # petal length, petal width\n", + "y = iris[\"target\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We need to add the bias term for every instance ($x_0 = 1$):" + ] + }, + { + "cell_type": "code", + "execution_count": 64, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "X_with_bias = np.c_[np.ones([len(X), 1]), X]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And let's set the random seed so the output of this exercise solution is reproducible:" + ] + }, + { + "cell_type": "code", + "execution_count": 65, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "np.random.seed(2042)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The easiest option to split the dataset into a training set, a validation set and a test set would be to use Scikit-Learn's `train_test_split()` function, but the point of this exercise is to try understand the algorithms by implementing them manually. So here is one possible implementation:" + ] + }, + { + "cell_type": "code", + "execution_count": 66, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "test_ratio = 0.2\n", + "validation_ratio = 0.2\n", + "total_size = len(X_with_bias)\n", + "\n", + "test_size = int(total_size * test_ratio)\n", + "validation_size = int(total_size * validation_ratio)\n", + "train_size = total_size - test_size - validation_size\n", + "\n", + "rnd_indices = np.random.permutation(total_size)\n", + "\n", + "X_train = X_with_bias[rnd_indices[:train_size]]\n", + "y_train = y[rnd_indices[:train_size]]\n", + "X_valid = X_with_bias[rnd_indices[train_size:-test_size]]\n", + "y_valid = y[rnd_indices[train_size:-test_size]]\n", + "X_test = X_with_bias[rnd_indices[-test_size:]]\n", + "y_test = y[rnd_indices[-test_size:]]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The targets are currently class indices (0, 1 or 2), but we need target class probabilities to train the Softmax Regression model. Each instance will have target class probabilities equal to 0.0 for all classes except for the target class which will have a probability of 1.0 (in other words, the vector of class probabilities for ay given instance is a one-hot vector). Let's write a small function to convert the vector of class indices into a matrix containing a one-hot vector for each instance:" + ] + }, + { + "cell_type": "code", + "execution_count": 67, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "def to_one_hot(y):\n", + " n_classes = y.max() + 1\n", + " m = len(y)\n", + " Y_one_hot = np.zeros((m, n_classes))\n", + " Y_one_hot[np.arange(m), y] = 1\n", + " return Y_one_hot" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's test this function on the first 10 instances:" + ] + }, + { + "cell_type": "code", + "execution_count": 68, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "y_train[:10]" + ] + }, + { + "cell_type": "code", + "execution_count": 69, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "to_one_hot(y_train[:10])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Looks good, so let's create the target class probabilities matrix for the training set and the test set:" + ] + }, + { + "cell_type": "code", + "execution_count": 70, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "Y_train_one_hot = to_one_hot(y_train)\n", + "Y_valid_one_hot = to_one_hot(y_valid)\n", + "Y_test_one_hot = to_one_hot(y_test)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's implement the Softmax function. Recall that it is defined by the following equation:\n", + "\n", + "$\\sigma\\left(\\mathbf{s}(\\mathbf{x})\\right)_k = \\dfrac{\\exp\\left(s_k(\\mathbf{x})\\right)}{\\sum\\limits_{j=1}^{K}{\\exp\\left(s_j(\\mathbf{x})\\right)}}$" + ] + }, + { + "cell_type": "code", + "execution_count": 71, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "def softmax(logits):\n", + " exps = np.exp(logits)\n", + " exp_sums = np.sum(exps, axis=1, keepdims=True)\n", + " return exps / exp_sums" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We are almost ready to start training. Let's define the number of inputs and outputs:" + ] + }, + { + "cell_type": "code", + "execution_count": 72, + "metadata": { + "collapsed": true, "deletable": true, "editable": true }, + "outputs": [], "source": [ - "**Coming soon**" + "n_inputs = X_train.shape[1] # == 3 (2 features plus the bias term)\n", + "n_outputs = len(np.unique(y_train)) # == 3 (3 iris classes)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now here comes the hardest part: training! Theoretically, it's simple: it's just a matter of translating the math equations into Python code. But in practice, it can be quite tricky: in particular, it's easy to mix up the order of the terms, or the indices. You can even end up with code that looks like it's working but is actually not computing exactly the right thing. When unsure, you should write down the shape of each term in the equation and make sure the corresponding terms in your code match closely. It can also help to evaluate each term independently and print them out. The good news it that you won't have to do this everyday, since all this is well implemented by Scikit-Learn, but it will help you understand what's going on under the hood.\n", + "\n", + "So the equations we will need are the cost function:\n", + "\n", + "$J(\\mathbf{\\Theta}) =\n", + "- \\dfrac{1}{m}\\sum\\limits_{i=1}^{m}\\sum\\limits_{k=1}^{K}{y_k^{(i)}\\log\\left(\\hat{p}_k^{(i)}\\right)}$\n", + "\n", + "And the equation for the gradients:\n", + "\n", + "$\\nabla_{\\mathbf{\\theta}^{(k)}} \\, J(\\mathbf{\\Theta}) = \\dfrac{1}{m} \\sum\\limits_{i=1}^{m}{ \\left ( \\hat{p}^{(i)}_k - y_k^{(i)} \\right ) \\mathbf{x}^{(i)}}$\n", + "\n", + "Note that $\\log\\left(\\hat{p}_k^{(i)}\\right)$ may not be computable if $\\hat{p}_k^{(i)} = 0$. So we will add a tiny value $\\epsilon$ to $\\log\\left(\\hat{p}_k^{(i)}\\right)$ to avoid getting `nan` values." + ] + }, + { + "cell_type": "code", + "execution_count": 73, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "eta = 0.01\n", + "n_iterations = 5001\n", + "m = len(X_train)\n", + "epsilon = 1e-7\n", + "\n", + "Theta = np.random.randn(n_inputs, n_outputs)\n", + "\n", + "for iteration in range(n_iterations):\n", + " logits = X_train.dot(Theta)\n", + " Y_proba = softmax(logits)\n", + " loss = -np.mean(np.sum(Y_train_one_hot * np.log(Y_proba + epsilon), axis=1))\n", + " error = Y_proba - Y_train_one_hot\n", + " if iteration % 500 == 0:\n", + " print(iteration, loss)\n", + " gradients = 1/m * X_train.T.dot(error)\n", + " Theta = Theta - eta * gradients" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And that's it! The Softmax model is trained. Let's look at the model parameters:" + ] + }, + { + "cell_type": "code", + "execution_count": 74, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "Theta" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's make predictions for the validation set and check the accuracy score:" + ] + }, + { + "cell_type": "code", + "execution_count": 75, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "logits = X_valid.dot(Theta)\n", + "Y_proba = softmax(logits)\n", + "y_predict = np.argmax(Y_proba, axis=1)\n", + "\n", + "accuracy_score = np.mean(y_predict == y_valid)\n", + "accuracy_score" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Well, this model looks pretty good. For the sake of the exercise, let's add a bit of $\\ell_2$ regularization. The following training code is similar to the one above, but the loss now has an additional $\\ell_2$ penalty, and the gradients have the proper additional term (note that we don't regularize the first element of `Theta` since this corresponds to the bias term). Also, let's try increasing the learning rate `eta`." + ] + }, + { + "cell_type": "code", + "execution_count": 76, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "eta = 0.1\n", + "n_iterations = 5001\n", + "m = len(X_train)\n", + "epsilon = 1e-7\n", + "alpha = 0.1 # regularization hyperparameter\n", + "\n", + "Theta = np.random.randn(n_inputs, n_outputs)\n", + "\n", + "for iteration in range(n_iterations):\n", + " logits = X_train.dot(Theta)\n", + " Y_proba = softmax(logits)\n", + " xentropy_loss = -np.mean(np.sum(Y_train_one_hot * np.log(Y_proba + epsilon), axis=1))\n", + " l2_loss = 1/2 * np.sum(np.square(Theta[1:]))\n", + " loss = xentropy_loss + alpha * l2_loss\n", + " error = Y_proba - Y_train_one_hot\n", + " if iteration % 500 == 0:\n", + " print(iteration, loss)\n", + " gradients = 1/m * X_train.T.dot(error) + np.r_[np.zeros([1, n_inputs]), alpha * Theta[1:]]\n", + " Theta = Theta - eta * gradients" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Because of the additional $\\ell_2$ penalty, the loss seems greater than earlier, but perhaps this model will perform better? Let's find out:" + ] + }, + { + "cell_type": "code", + "execution_count": 77, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "logits = X_valid.dot(Theta)\n", + "Y_proba = softmax(logits)\n", + "y_predict = np.argmax(Y_proba, axis=1)\n", + "\n", + "accuracy_score = np.mean(y_predict == y_valid)\n", + "accuracy_score" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Cool, perfect accuracy! We probably just got lucky with this validation set, but still, it's pleasant." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's add early stopping. For this we just need to measure the loss on the validation set at every iteration and stop when the error starts growing." + ] + }, + { + "cell_type": "code", + "execution_count": 78, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "eta = 0.1 \n", + "n_iterations = 5001\n", + "m = len(X_train)\n", + "epsilon = 1e-7\n", + "alpha = 0.1 # regularization hyperparameter\n", + "best_loss = np.infty\n", + "\n", + "Theta = np.random.randn(n_inputs, n_outputs)\n", + "\n", + "for iteration in range(n_iterations):\n", + " logits = X_train.dot(Theta)\n", + " Y_proba = softmax(logits)\n", + " xentropy_loss = -np.mean(np.sum(Y_train_one_hot * np.log(Y_proba + epsilon), axis=1))\n", + " l2_loss = 1/2 * np.sum(np.square(Theta[1:]))\n", + " loss = xentropy_loss + alpha * l2_loss\n", + " error = Y_proba - Y_train_one_hot\n", + " gradients = 1/m * X_train.T.dot(error) + np.r_[np.zeros([1, n_inputs]), alpha * Theta[1:]]\n", + " Theta = Theta - eta * gradients\n", + "\n", + " logits = X_valid.dot(Theta)\n", + " Y_proba = softmax(logits)\n", + " xentropy_loss = -np.mean(np.sum(Y_valid_one_hot * np.log(Y_proba + epsilon), axis=1))\n", + " l2_loss = 1/2 * np.sum(np.square(Theta[1:]))\n", + " loss = xentropy_loss + alpha * l2_loss\n", + " if iteration % 500 == 0:\n", + " print(iteration, loss)\n", + " if loss < best_loss:\n", + " best_loss = loss\n", + " else:\n", + " print(iteration - 1, best_loss)\n", + " print(iteration, loss, \"early stopping!\")\n", + " break" + ] + }, + { + "cell_type": "code", + "execution_count": 79, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "logits = X_valid.dot(Theta)\n", + "Y_proba = softmax(logits)\n", + "y_predict = np.argmax(Y_proba, axis=1)\n", + "\n", + "accuracy_score = np.mean(y_predict == y_valid)\n", + "accuracy_score" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Still perfect, but faster." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's plot the model's predictions on the whole dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": 80, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "x0, x1 = np.meshgrid(\n", + " np.linspace(0, 8, 500).reshape(-1, 1),\n", + " np.linspace(0, 3.5, 200).reshape(-1, 1),\n", + " )\n", + "X_new = np.c_[x0.ravel(), x1.ravel()]\n", + "X_new_with_bias = np.c_[np.ones([len(X_new), 1]), X_new]\n", + "\n", + "logits = X_new_with_bias.dot(Theta)\n", + "Y_proba = softmax(logits)\n", + "y_predict = np.argmax(Y_proba, axis=1)\n", + "\n", + "zz1 = Y_proba[:, 1].reshape(x0.shape)\n", + "zz = y_predict.reshape(x0.shape)\n", + "\n", + "plt.figure(figsize=(10, 4))\n", + "plt.plot(X[y==2, 0], X[y==2, 1], \"g^\", label=\"Iris-Virginica\")\n", + "plt.plot(X[y==1, 0], X[y==1, 1], \"bs\", label=\"Iris-Versicolor\")\n", + "plt.plot(X[y==0, 0], X[y==0, 1], \"yo\", label=\"Iris-Setosa\")\n", + "\n", + "from matplotlib.colors import ListedColormap\n", + "custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0'])\n", + "\n", + "plt.contourf(x0, x1, zz, cmap=custom_cmap, linewidth=5)\n", + "contour = plt.contour(x0, x1, zz1, cmap=plt.cm.brg)\n", + "plt.clabel(contour, inline=1, fontsize=12)\n", + "plt.xlabel(\"Petal length\", fontsize=14)\n", + "plt.ylabel(\"Petal width\", fontsize=14)\n", + "plt.legend(loc=\"upper left\", fontsize=14)\n", + "plt.axis([0, 7, 0, 3.5])\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And now let's measure the final model's accuracy on the test set:" + ] + }, + { + "cell_type": "code", + "execution_count": 81, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "logits = X_test.dot(Theta)\n", + "Y_proba = softmax(logits)\n", + "y_predict = np.argmax(Y_proba, axis=1)\n", + "\n", + "accuracy_score = np.mean(y_predict == y_test)\n", + "accuracy_score" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Our perfect model turns out to have slight imperfections. This variability is likely due to the very small size of the dataset: depending on how you sample the training set, validation set and the test set, you can get quite different results. Try changing the random seed and running the code again a few times, you will see that the results will vary." ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": true, - "deletable": true, - "editable": true + "collapsed": true }, "outputs": [], "source": [] @@ -1336,7 +2018,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.5.2+" + "version": "3.5.3" }, "nav_menu": {}, "toc": {