diff --git a/01_the_machine_learning_landscape.ipynb b/01_the_machine_learning_landscape.ipynb index 24f8aaa..622a80a 100644 --- a/01_the_machine_learning_landscape.ipynb +++ b/01_the_machine_learning_landscape.ipynb @@ -124,7 +124,7 @@ "outputs": [], "source": [ "# Download the data\n", - "import urllib\n", + "import urllib.request\n", "DOWNLOAD_ROOT = \"https://raw.githubusercontent.com/ageron/handson-ml2/master/\"\n", "os.makedirs(datapath, exist_ok=True)\n", "for filename in (\"oecd_bli_2015.csv\", \"gdp_per_capita.csv\"):\n", @@ -785,7 +785,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.6" + "version": "3.7.9" }, "nav_menu": {}, "toc": { diff --git a/02_end_to_end_machine_learning_project.ipynb b/02_end_to_end_machine_learning_project.ipynb index 0aafdf6..b8619d6 100644 --- a/02_end_to_end_machine_learning_project.ipynb +++ b/02_end_to_end_machine_learning_project.ipynb @@ -73,11 +73,7 @@ " print(\"Saving figure\", fig_id)\n", " if tight_layout:\n", " plt.tight_layout()\n", - " plt.savefig(path, format=fig_extension, dpi=resolution)\n", - "\n", - "# Ignore useless warnings (see SciPy issue #5998)\n", - "import warnings\n", - "warnings.filterwarnings(action=\"ignore\", message=\"^internal gelsd\")" + " plt.savefig(path, format=fig_extension, dpi=resolution)" ] }, { @@ -95,7 +91,7 @@ "source": [ "import os\n", "import tarfile\n", - "import urllib\n", + "import urllib.request\n", "\n", "DOWNLOAD_ROOT = \"https://raw.githubusercontent.com/ageron/handson-ml2/master/\"\n", "HOUSING_PATH = os.path.join(\"datasets\", \"housing\")\n", @@ -490,9 +486,9 @@ "outputs": [], "source": [ "housing.plot(kind=\"scatter\", x=\"longitude\", y=\"latitude\", alpha=0.4,\n", - " s=housing[\"population\"]/100, label=\"population\", figsize=(10,7),\n", - " c=\"median_house_value\", cmap=plt.get_cmap(\"jet\"), colorbar=True,\n", - " sharex=False)\n", + " s=housing[\"population\"]/100, label=\"population\", figsize=(10,7),\n", + " c=\"median_house_value\", cmap=plt.get_cmap(\"jet\"), colorbar=True,\n", + " sharex=False)\n", "plt.legend()\n", "save_fig(\"housing_prices_scatterplot\")" ] @@ -522,10 +518,9 @@ "import matplotlib.image as mpimg\n", "california_img=mpimg.imread(os.path.join(images_path, filename))\n", "ax = housing.plot(kind=\"scatter\", x=\"longitude\", y=\"latitude\", figsize=(10,7),\n", - " s=housing['population']/100, label=\"Population\",\n", - " c=\"median_house_value\", cmap=plt.get_cmap(\"jet\"),\n", - " colorbar=False, alpha=0.4,\n", - " )\n", + " s=housing['population']/100, label=\"Population\",\n", + " c=\"median_house_value\", cmap=plt.get_cmap(\"jet\"),\n", + " colorbar=False, alpha=0.4)\n", "plt.imshow(california_img, extent=[-124.55, -113.80, 32.45, 42.05], alpha=0.5,\n", " cmap=plt.get_cmap(\"jet\"))\n", "plt.ylabel(\"Latitude\", fontsize=14)\n", @@ -1694,6 +1689,13 @@ "Question: Try a Support Vector Machine regressor (`sklearn.svm.SVR`), with various hyperparameters such as `kernel=\"linear\"` (with various values for the `C` hyperparameter) or `kernel=\"rbf\"` (with various values for the `C` and `gamma` hyperparameters). Don't worry about what these hyperparameters mean for now. How does the best `SVR` predictor perform?" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Warning**: the following cell may take close to 30 minutes to run, or more depending on your hardware." + ] + }, { "cell_type": "code", "execution_count": 117, @@ -1768,6 +1770,13 @@ "Question: Try replacing `GridSearchCV` with `RandomizedSearchCV`." ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Warning**: the following cell may take close to 45 minutes to run, or more depending on your hardware." + ] + }, { "cell_type": "code", "execution_count": 120, @@ -2137,6 +2146,13 @@ "Question: Automatically explore some preparation options using `GridSearchCV`." ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Warning**: the following cell may take close to 45 minutes to run, or more depending on your hardware." + ] + }, { "cell_type": "code", "execution_count": 137, @@ -2193,7 +2209,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.8" + "version": "3.7.9" }, "nav_menu": { "height": "279px", diff --git a/03_classification.ipynb b/03_classification.ipynb index 5d76ee1..cd5ac1e 100644 --- a/03_classification.ipynb +++ b/03_classification.ipynb @@ -345,7 +345,7 @@ "* first, Scikit-Learn and other libraries evolve, and algorithms get tweaked a bit, which may change the exact result you get. If you use the latest Scikit-Learn version (and in general, you really should), you probably won't be using the exact same version I used when I wrote the book or this notebook, hence the difference. I try to keep this notebook reasonably up to date, but I can't change the numbers on the pages in your copy of the book.\n", "* second, many training algorithms are stochastic, meaning they rely on randomness. In principle, it's possible to get consistent outputs from a random number generator by setting the seed from which it generates the pseudo-random numbers (which is why you will see `random_state=42` or `np.random.seed(42)` pretty often). However, sometimes this does not suffice due to the other factors listed here.\n", "* third, if the training algorithm runs across multiple threads (as do some algorithms implemented in C) or across multiple processes (e.g., when using the `n_jobs` argument), then the precise order in which operations will run is not always guaranteed, and thus the exact result may vary slightly.\n", - "* lastly, other things may prevent perfect reproducibility, such as Python maps and sets whose order is not guaranteed to be stable across sessions, or the order of files in a directory which is also not guaranteed." + "* lastly, other things may prevent perfect reproducibility, such as Python dicts and sets whose order is not guaranteed to be stable across sessions, or the order of files in a directory which is also not guaranteed." ] }, { @@ -393,7 +393,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 25, "metadata": {}, "outputs": [], "source": [ @@ -412,7 +412,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 27, "metadata": {}, "outputs": [], "source": [ @@ -481,7 +481,7 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 34, "metadata": {}, "outputs": [], "source": [ @@ -491,7 +491,7 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 35, "metadata": {}, "outputs": [], "source": [ @@ -502,7 +502,7 @@ }, { "cell_type": "code", - "execution_count": 32, + "execution_count": 36, "metadata": {}, "outputs": [], "source": [ @@ -533,7 +533,7 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 37, "metadata": {}, "outputs": [], "source": [ @@ -542,7 +542,7 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 38, "metadata": {}, "outputs": [], "source": [ @@ -564,7 +564,7 @@ }, { "cell_type": "code", - "execution_count": 42, + "execution_count": 39, "metadata": {}, "outputs": [], "source": [ @@ -573,7 +573,7 @@ }, { "cell_type": "code", - "execution_count": 43, + "execution_count": 40, "metadata": {}, "outputs": [], "source": [ @@ -582,7 +582,7 @@ }, { "cell_type": "code", - "execution_count": 44, + "execution_count": 41, "metadata": {}, "outputs": [], "source": [ @@ -591,7 +591,7 @@ }, { "cell_type": "code", - "execution_count": 45, + "execution_count": 42, "metadata": {}, "outputs": [], "source": [ @@ -600,7 +600,7 @@ }, { "cell_type": "code", - "execution_count": 46, + "execution_count": 43, "metadata": {}, "outputs": [], "source": [ @@ -616,7 +616,7 @@ }, { "cell_type": "code", - "execution_count": 47, + "execution_count": 44, "metadata": {}, "outputs": [], "source": [ @@ -627,7 +627,7 @@ }, { "cell_type": "code", - "execution_count": 50, + "execution_count": 45, "metadata": {}, "outputs": [], "source": [ @@ -651,7 +651,7 @@ }, { "cell_type": "code", - "execution_count": 53, + "execution_count": 46, "metadata": {}, "outputs": [], "source": [ @@ -669,7 +669,7 @@ }, { "cell_type": "code", - "execution_count": 54, + "execution_count": 47, "metadata": {}, "outputs": [], "source": [ @@ -681,7 +681,7 @@ }, { "cell_type": "code", - "execution_count": 55, + "execution_count": 48, "metadata": {}, "outputs": [], "source": [ @@ -691,7 +691,7 @@ }, { "cell_type": "code", - "execution_count": 57, + "execution_count": 49, "metadata": {}, "outputs": [], "source": [ @@ -713,7 +713,7 @@ }, { "cell_type": "code", - "execution_count": 58, + "execution_count": 50, "metadata": {}, "outputs": [], "source": [ @@ -722,7 +722,7 @@ }, { "cell_type": "code", - "execution_count": 59, + "execution_count": 51, "metadata": {}, "outputs": [], "source": [ @@ -732,7 +732,7 @@ }, { "cell_type": "code", - "execution_count": 60, + "execution_count": 52, "metadata": {}, "outputs": [], "source": [ @@ -836,6 +836,13 @@ "sgd_clf.decision_function([some_digit])" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Warning**: the following two cells may take close to 30 minutes to run, or more depending on your hardware." + ] + }, { "cell_type": "code", "execution_count": 62, @@ -1202,7 +1209,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "**Warning**: the next cell may take hours to run, depending on your hardware." + "**Warning**: the next cell may take close to 16 hours to run, or more depending on your hardware." ] }, { @@ -1348,6 +1355,13 @@ "knn_clf.fit(X_train_augmented, y_train_augmented)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Warning**: the following cell may take close to an hour to run, depending on your hardware." + ] + }, { "cell_type": "code", "execution_count": 99, @@ -1918,7 +1932,7 @@ "source": [ "import os\n", "import tarfile\n", - "import urllib\n", + "import urllib.request\n", "\n", "DOWNLOAD_ROOT = \"http://spamassassin.apache.org/old/publiccorpus/\"\n", "HAM_URL = DOWNLOAD_ROOT + \"20030228_easy_ham.tar.bz2\"\n", @@ -2149,7 +2163,7 @@ }, { "cell_type": "code", - "execution_count": 185, + "execution_count": 142, "metadata": {}, "outputs": [], "source": [ @@ -2510,7 +2524,7 @@ }, { "cell_type": "code", - "execution_count": 183, + "execution_count": 158, "metadata": {}, "outputs": [], "source": [ @@ -2533,7 +2547,7 @@ }, { "cell_type": "code", - "execution_count": 184, + "execution_count": 159, "metadata": {}, "outputs": [], "source": [ diff --git a/04_training_linear_models.ipynb b/04_training_linear_models.ipynb index 0ac4d1a..f91c9cc 100644 --- a/04_training_linear_models.ipynb +++ b/04_training_linear_models.ipynb @@ -79,11 +79,7 @@ " print(\"Saving figure\", fig_id)\n", " if tight_layout:\n", " plt.tight_layout()\n", - " plt.savefig(path, format=fig_extension, dpi=resolution)\n", - "\n", - "# Ignore useless warnings (see SciPy issue #5998)\n", - "import warnings\n", - "warnings.filterwarnings(action=\"ignore\", message=\"^internal gelsd\")" + " plt.savefig(path, format=fig_extension, dpi=resolution)" ] }, { @@ -1797,7 +1793,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.6" + "version": "3.7.8" }, "nav_menu": {}, "toc": { diff --git a/05_support_vector_machines.ipynb b/05_support_vector_machines.ipynb index 2fa5543..208cf18 100644 --- a/05_support_vector_machines.ipynb +++ b/05_support_vector_machines.ipynb @@ -1566,7 +1566,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This looks pretty low but remember we only trained the model on 1,000 instances. Let's retrain the best estimator on the whole training set (run this at night, it will take hours):" + "This looks pretty low but remember we only trained the model on 1,000 instances. Let's retrain the best estimator on the whole training set:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Warning**: the following cell may take hours to run, depending on your hardware." ] }, { @@ -1830,7 +1837,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.6" + "version": "3.7.8" }, "nav_menu": {}, "toc": { diff --git a/06_decision_trees.ipynb b/06_decision_trees.ipynb index a8237ac..504acaa 100644 --- a/06_decision_trees.ipynb +++ b/06_decision_trees.ipynb @@ -729,7 +729,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.6" + "version": "3.7.8" }, "nav_menu": { "height": "309px", diff --git a/07_ensemble_learning_and_random_forests.ipynb b/07_ensemble_learning_and_random_forests.ipynb index f4f135e..63a224e 100644 --- a/07_ensemble_learning_and_random_forests.ipynb +++ b/07_ensemble_learning_and_random_forests.ipynb @@ -181,6 +181,13 @@ " print(clf.__class__.__name__, accuracy_score(y_test, y_pred))" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Note**: the results in this notebook may differ slightly from the book, as Scikit-Learn algorithms sometimes get tweaked." + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -535,21 +542,26 @@ "\n", "fix, axes = plt.subplots(ncols=2, figsize=(10,4), sharey=True)\n", "for subplot, learning_rate in ((0, 1), (1, 0.5)):\n", - " sample_weights = np.ones(m)\n", + " sample_weights = np.ones(m) / m\n", " plt.sca(axes[subplot])\n", " for i in range(5):\n", - " svm_clf = SVC(kernel=\"rbf\", C=0.05, gamma=\"scale\", random_state=42)\n", - " svm_clf.fit(X_train, y_train, sample_weight=sample_weights)\n", + " svm_clf = SVC(kernel=\"rbf\", C=0.2, gamma=0.6, random_state=42)\n", + " svm_clf.fit(X_train, y_train, sample_weight=sample_weights * m)\n", " y_pred = svm_clf.predict(X_train)\n", - " sample_weights[y_pred != y_train] *= (1 + learning_rate)\n", + "\n", + " r = sample_weights[y_pred != y_train].sum() / sample_weights.sum() # equation 7-1\n", + " alpha = learning_rate * np.log((1 - r) / r) # equation 7-2\n", + " sample_weights[y_pred != y_train] *= np.exp(alpha) # equation 7-3\n", + " sample_weights /= sample_weights.sum() # normalization step\n", + "\n", " plot_decision_boundary(svm_clf, X, y, alpha=0.2)\n", " plt.title(\"learning_rate = {}\".format(learning_rate), fontsize=16)\n", " if subplot == 0:\n", - " plt.text(-0.7, -0.65, \"1\", fontsize=14)\n", - " plt.text(-0.6, -0.10, \"2\", fontsize=14)\n", - " plt.text(-0.5, 0.10, \"3\", fontsize=14)\n", - " plt.text(-0.4, 0.55, \"4\", fontsize=14)\n", - " plt.text(-0.3, 0.90, \"5\", fontsize=14)\n", + " plt.text(-0.75, -0.95, \"1\", fontsize=14)\n", + " plt.text(-1.05, -0.95, \"2\", fontsize=14)\n", + " plt.text(1.0, -0.95, \"3\", fontsize=14)\n", + " plt.text(-1.45, -0.5, \"4\", fontsize=14)\n", + " plt.text(1.36, -0.95, \"5\", fontsize=14)\n", " else:\n", " plt.ylabel(\"\")\n", "\n", @@ -557,15 +569,6 @@ "plt.show()" ] }, - { - "cell_type": "code", - "execution_count": 32, - "metadata": {}, - "outputs": [], - "source": [ - "list(m for m in dir(ada_clf) if not m.startswith(\"_\") and m.endswith(\"_\"))" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -575,7 +578,7 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 32, "metadata": {}, "outputs": [], "source": [ @@ -586,7 +589,7 @@ }, { "cell_type": "code", - "execution_count": 34, + "execution_count": 33, "metadata": {}, "outputs": [], "source": [ @@ -598,7 +601,7 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 34, "metadata": {}, "outputs": [], "source": [ @@ -609,7 +612,7 @@ }, { "cell_type": "code", - "execution_count": 36, + "execution_count": 35, "metadata": {}, "outputs": [], "source": [ @@ -620,7 +623,7 @@ }, { "cell_type": "code", - "execution_count": 37, + "execution_count": 36, "metadata": {}, "outputs": [], "source": [ @@ -629,7 +632,7 @@ }, { "cell_type": "code", - "execution_count": 38, + "execution_count": 37, "metadata": {}, "outputs": [], "source": [ @@ -638,7 +641,7 @@ }, { "cell_type": "code", - "execution_count": 39, + "execution_count": 38, "metadata": {}, "outputs": [], "source": [ @@ -647,7 +650,7 @@ }, { "cell_type": "code", - "execution_count": 40, + "execution_count": 39, "metadata": {}, "outputs": [], "source": [ @@ -663,7 +666,7 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": 40, "metadata": {}, "outputs": [], "source": [ @@ -703,7 +706,7 @@ }, { "cell_type": "code", - "execution_count": 42, + "execution_count": 41, "metadata": {}, "outputs": [], "source": [ @@ -715,7 +718,7 @@ }, { "cell_type": "code", - "execution_count": 43, + "execution_count": 42, "metadata": {}, "outputs": [], "source": [ @@ -725,7 +728,7 @@ }, { "cell_type": "code", - "execution_count": 44, + "execution_count": 43, "metadata": {}, "outputs": [], "source": [ @@ -755,7 +758,7 @@ }, { "cell_type": "code", - "execution_count": 45, + "execution_count": 44, "metadata": {}, "outputs": [], "source": [ @@ -778,7 +781,7 @@ }, { "cell_type": "code", - "execution_count": 46, + "execution_count": 45, "metadata": {}, "outputs": [], "source": [ @@ -787,7 +790,7 @@ }, { "cell_type": "code", - "execution_count": 47, + "execution_count": 46, "metadata": {}, "outputs": [], "source": [ @@ -816,7 +819,7 @@ }, { "cell_type": "code", - "execution_count": 48, + "execution_count": 47, "metadata": {}, "outputs": [], "source": [ @@ -840,7 +843,7 @@ }, { "cell_type": "code", - "execution_count": 49, + "execution_count": 48, "metadata": {}, "outputs": [], "source": [ @@ -849,7 +852,7 @@ }, { "cell_type": "code", - "execution_count": 50, + "execution_count": 49, "metadata": {}, "outputs": [], "source": [ @@ -865,7 +868,7 @@ }, { "cell_type": "code", - "execution_count": 51, + "execution_count": 50, "metadata": {}, "outputs": [], "source": [ @@ -878,7 +881,7 @@ }, { "cell_type": "code", - "execution_count": 52, + "execution_count": 51, "metadata": {}, "outputs": [], "source": [ @@ -892,7 +895,7 @@ }, { "cell_type": "code", - "execution_count": 53, + "execution_count": 52, "metadata": {}, "outputs": [], "source": [ @@ -906,7 +909,7 @@ }, { "cell_type": "code", - "execution_count": 54, + "execution_count": 53, "metadata": {}, "outputs": [], "source": [ @@ -915,7 +918,7 @@ }, { "cell_type": "code", - "execution_count": 55, + "execution_count": 54, "metadata": {}, "outputs": [], "source": [ @@ -966,7 +969,7 @@ }, { "cell_type": "code", - "execution_count": 56, + "execution_count": 55, "metadata": {}, "outputs": [], "source": [ @@ -975,7 +978,7 @@ }, { "cell_type": "code", - "execution_count": 57, + "execution_count": 56, "metadata": {}, "outputs": [], "source": [ @@ -994,7 +997,7 @@ }, { "cell_type": "code", - "execution_count": 58, + "execution_count": 57, "metadata": {}, "outputs": [], "source": [ @@ -1005,19 +1008,19 @@ }, { "cell_type": "code", - "execution_count": 59, + "execution_count": 58, "metadata": {}, "outputs": [], "source": [ "random_forest_clf = RandomForestClassifier(n_estimators=100, random_state=42)\n", "extra_trees_clf = ExtraTreesClassifier(n_estimators=100, random_state=42)\n", - "svm_clf = LinearSVC(random_state=42)\n", + "svm_clf = LinearSVC(max_iter=100, tol=20, random_state=42)\n", "mlp_clf = MLPClassifier(random_state=42)" ] }, { "cell_type": "code", - "execution_count": 60, + "execution_count": 59, "metadata": {}, "outputs": [], "source": [ @@ -1029,7 +1032,7 @@ }, { "cell_type": "code", - "execution_count": 61, + "execution_count": 60, "metadata": {}, "outputs": [], "source": [ @@ -1052,7 +1055,7 @@ }, { "cell_type": "code", - "execution_count": 62, + "execution_count": 61, "metadata": {}, "outputs": [], "source": [ @@ -1061,7 +1064,7 @@ }, { "cell_type": "code", - "execution_count": 63, + "execution_count": 62, "metadata": {}, "outputs": [], "source": [ @@ -1075,7 +1078,7 @@ }, { "cell_type": "code", - "execution_count": 64, + "execution_count": 63, "metadata": {}, "outputs": [], "source": [ @@ -1084,7 +1087,7 @@ }, { "cell_type": "code", - "execution_count": 65, + "execution_count": 64, "metadata": {}, "outputs": [], "source": [ @@ -1093,7 +1096,7 @@ }, { "cell_type": "code", - "execution_count": 66, + "execution_count": 65, "metadata": {}, "outputs": [], "source": [ @@ -1102,7 +1105,7 @@ }, { "cell_type": "code", - "execution_count": 67, + "execution_count": 66, "metadata": {}, "outputs": [], "source": [ @@ -1118,7 +1121,7 @@ }, { "cell_type": "code", - "execution_count": 68, + "execution_count": 67, "metadata": {}, "outputs": [], "source": [ @@ -1134,7 +1137,7 @@ }, { "cell_type": "code", - "execution_count": 69, + "execution_count": 68, "metadata": {}, "outputs": [], "source": [ @@ -1150,7 +1153,7 @@ }, { "cell_type": "code", - "execution_count": 70, + "execution_count": 69, "metadata": {}, "outputs": [], "source": [ @@ -1166,7 +1169,7 @@ }, { "cell_type": "code", - "execution_count": 71, + "execution_count": 70, "metadata": {}, "outputs": [], "source": [ @@ -1182,7 +1185,7 @@ }, { "cell_type": "code", - "execution_count": 72, + "execution_count": 71, "metadata": {}, "outputs": [], "source": [ @@ -1198,7 +1201,7 @@ }, { "cell_type": "code", - "execution_count": 73, + "execution_count": 72, "metadata": {}, "outputs": [], "source": [ @@ -1207,7 +1210,7 @@ }, { "cell_type": "code", - "execution_count": 74, + "execution_count": 73, "metadata": {}, "outputs": [], "source": [ @@ -1230,7 +1233,7 @@ }, { "cell_type": "code", - "execution_count": 75, + "execution_count": 74, "metadata": {}, "outputs": [], "source": [ @@ -1240,7 +1243,7 @@ }, { "cell_type": "code", - "execution_count": 76, + "execution_count": 75, "metadata": {}, "outputs": [], "source": [ @@ -1270,7 +1273,7 @@ }, { "cell_type": "code", - "execution_count": 77, + "execution_count": 76, "metadata": {}, "outputs": [], "source": [ @@ -1282,7 +1285,7 @@ }, { "cell_type": "code", - "execution_count": 78, + "execution_count": 77, "metadata": {}, "outputs": [], "source": [ @@ -1291,7 +1294,7 @@ }, { "cell_type": "code", - "execution_count": 79, + "execution_count": 78, "metadata": {}, "outputs": [], "source": [ @@ -1301,7 +1304,7 @@ }, { "cell_type": "code", - "execution_count": 80, + "execution_count": 79, "metadata": {}, "outputs": [], "source": [ @@ -1324,7 +1327,7 @@ }, { "cell_type": "code", - "execution_count": 81, + "execution_count": 80, "metadata": {}, "outputs": [], "source": [ @@ -1336,7 +1339,7 @@ }, { "cell_type": "code", - "execution_count": 82, + "execution_count": 81, "metadata": {}, "outputs": [], "source": [ @@ -1345,7 +1348,7 @@ }, { "cell_type": "code", - "execution_count": 83, + "execution_count": 82, "metadata": {}, "outputs": [], "source": [ @@ -1354,7 +1357,7 @@ }, { "cell_type": "code", - "execution_count": 84, + "execution_count": 83, "metadata": {}, "outputs": [], "source": [ @@ -1392,7 +1395,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.6" + "version": "3.7.8" }, "nav_menu": { "height": "252px", diff --git a/08_dimensionality_reduction.ipynb b/08_dimensionality_reduction.ipynb index 1be0a56..c7f1797 100644 --- a/08_dimensionality_reduction.ipynb +++ b/08_dimensionality_reduction.ipynb @@ -74,11 +74,7 @@ " print(\"Saving figure\", fig_id)\n", " if tight_layout:\n", " plt.tight_layout()\n", - " plt.savefig(path, format=fig_extension, dpi=resolution)\n", - "\n", - "# Ignore useless warnings (see SciPy issue #5998)\n", - "import warnings\n", - "warnings.filterwarnings(action=\"ignore\", message=\"^internal gelsd\")" + " plt.savefig(path, format=fig_extension, dpi=resolution)" ] }, { @@ -1731,7 +1727,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Nice! Reducing dimensionality led to a 4× speedup. :) Let's check the model's accuracy:" + "Nice! Reducing dimensionality led to over 2× speedup. :) Let's check the model's accuracy:" ] }, { @@ -1748,7 +1744,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "A very slight drop in performance, which might be a reasonable price to pay for a 4× speedup, depending on the application." + "A very slight drop in performance, which might be a reasonable price to pay for a 2× speedup, depending on the application." ] }, { @@ -2229,7 +2225,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Yes, PCA roughly gave us a 25% speedup, without damaging the result. We have a winner!" + "Yes, PCA roughly gave us over 2x speedup, without damaging the result. We have a winner!" ] }, { @@ -2256,7 +2252,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.6" + "version": "3.7.8" } }, "nbformat": 4, diff --git a/09_unsupervised_learning.ipynb b/09_unsupervised_learning.ipynb index fc9197d..67283b1 100644 --- a/09_unsupervised_learning.ipynb +++ b/09_unsupervised_learning.ipynb @@ -74,11 +74,7 @@ " print(\"Saving figure\", fig_id)\n", " if tight_layout:\n", " plt.tight_layout()\n", - " plt.savefig(path, format=fig_extension, dpi=resolution)\n", - "\n", - "# Ignore useless warnings (see SciPy issue #5998)\n", - "import warnings\n", - "warnings.filterwarnings(action=\"ignore\", message=\"^internal gelsd\")" + " plt.savefig(path, format=fig_extension, dpi=resolution)" ] }, { @@ -163,9 +159,14 @@ "metadata": {}, "outputs": [], "source": [ - "y_pred = GaussianMixture(n_components=3, random_state=42).fit(X).predict(X)\n", - "mapping = np.array([2, 0, 1])\n", - "y_pred = np.array([mapping[cluster_id] for cluster_id in y_pred])" + "y_pred = GaussianMixture(n_components=3, random_state=42).fit(X).predict(X)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's map each cluster to a class. Instead of hard coding the mapping (as is done in the book, for simplicity), we will pick the most common class for each cluster (using the `scipy.stats.mode()` function):" ] }, { @@ -173,6 +174,31 @@ "execution_count": 7, "metadata": {}, "outputs": [], + "source": [ + "from scipy import stats\n", + "\n", + "mapping = {}\n", + "for class_id in np.unique(y):\n", + " mode, _ = stats.mode(y_pred[y==class_id])\n", + " mapping[mode[0]] = class_id\n", + "\n", + "mapping" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "y_pred = np.array([mapping[cluster_id] for cluster_id in y_pred])" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], "source": [ "plt.plot(X[y_pred==0, 2], X[y_pred==0, 3], \"yo\", label=\"Cluster 1\")\n", "plt.plot(X[y_pred==1, 2], X[y_pred==1, 3], \"bs\", label=\"Cluster 2\")\n", @@ -185,7 +211,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ @@ -194,13 +220,20 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ "np.sum(y_pred==y) / len(y_pred)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Note**: the results in this notebook may differ slightly from the book. This is because algorithms can sometimes be tweaked a bit between Scikit-Learn versions." + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -217,7 +250,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 12, "metadata": {}, "outputs": [], "source": [ @@ -226,7 +259,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 13, "metadata": {}, "outputs": [], "source": [ @@ -241,7 +274,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 14, "metadata": {}, "outputs": [], "source": [ @@ -258,7 +291,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 15, "metadata": {}, "outputs": [], "source": [ @@ -270,7 +303,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 16, "metadata": {}, "outputs": [], "source": [ @@ -296,7 +329,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 17, "metadata": {}, "outputs": [], "source": [ @@ -305,7 +338,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 18, "metadata": {}, "outputs": [], "source": [ @@ -323,7 +356,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 19, "metadata": {}, "outputs": [], "source": [ @@ -332,7 +365,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 20, "metadata": {}, "outputs": [], "source": [ @@ -348,7 +381,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 21, "metadata": {}, "outputs": [], "source": [ @@ -364,7 +397,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 22, "metadata": {}, "outputs": [], "source": [ @@ -380,7 +413,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 23, "metadata": {}, "outputs": [], "source": [ @@ -404,7 +437,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 24, "metadata": {}, "outputs": [], "source": [ @@ -415,10 +448,10 @@ " if weights is not None:\n", " centroids = centroids[weights > weights.max() / 10]\n", " plt.scatter(centroids[:, 0], centroids[:, 1],\n", - " marker='o', s=30, linewidths=8,\n", + " marker='o', s=35, linewidths=8,\n", " color=circle_color, zorder=10, alpha=0.9)\n", " plt.scatter(centroids[:, 0], centroids[:, 1],\n", - " marker='x', s=50, linewidths=50,\n", + " marker='x', s=2, linewidths=12,\n", " color=cross_color, zorder=11, alpha=1)\n", "\n", "def plot_decision_boundaries(clusterer, X, resolution=1000, show_centroids=True,\n", @@ -450,7 +483,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 25, "metadata": {}, "outputs": [], "source": [ @@ -483,7 +516,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 26, "metadata": {}, "outputs": [], "source": [ @@ -499,7 +532,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 27, "metadata": {}, "outputs": [], "source": [ @@ -517,7 +550,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The K-Means algorithm is one of the fastest clustering algorithms, but also one of the simplest:\n", + "The K-Means algorithm is one of the fastest clustering algorithms, and also one of the simplest:\n", "* First initialize $k$ centroids randomly: $k$ distinct instances are chosen randomly from the dataset and the centroids are placed at their locations.\n", "* Repeat until convergence (i.e., until the centroids stop moving):\n", " * Assign each instance to the closest centroid.\n", @@ -540,16 +573,16 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 28, "metadata": {}, "outputs": [], "source": [ "kmeans_iter1 = KMeans(n_clusters=5, init=\"random\", n_init=1,\n", - " algorithm=\"full\", max_iter=1, random_state=1)\n", + " algorithm=\"full\", max_iter=1, random_state=0)\n", "kmeans_iter2 = KMeans(n_clusters=5, init=\"random\", n_init=1,\n", - " algorithm=\"full\", max_iter=2, random_state=1)\n", + " algorithm=\"full\", max_iter=2, random_state=0)\n", "kmeans_iter3 = KMeans(n_clusters=5, init=\"random\", n_init=1,\n", - " algorithm=\"full\", max_iter=3, random_state=1)\n", + " algorithm=\"full\", max_iter=3, random_state=0)\n", "kmeans_iter1.fit(X)\n", "kmeans_iter2.fit(X)\n", "kmeans_iter3.fit(X)" @@ -564,7 +597,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 29, "metadata": {}, "outputs": [], "source": [ @@ -617,7 +650,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 30, "metadata": {}, "outputs": [], "source": [ @@ -640,14 +673,14 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 31, "metadata": {}, "outputs": [], "source": [ "kmeans_rnd_init1 = KMeans(n_clusters=5, init=\"random\", n_init=1,\n", - " algorithm=\"full\", random_state=11)\n", + " algorithm=\"full\", random_state=2)\n", "kmeans_rnd_init2 = KMeans(n_clusters=5, init=\"random\", n_init=1,\n", - " algorithm=\"full\", random_state=19)\n", + " algorithm=\"full\", random_state=5)\n", "\n", "plot_clusterer_comparison(kmeans_rnd_init1, kmeans_rnd_init2, X,\n", " \"Solution 1\", \"Solution 2 (with a different random init)\")\n", @@ -672,7 +705,7 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 32, "metadata": {}, "outputs": [], "source": [ @@ -688,7 +721,7 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 33, "metadata": {}, "outputs": [], "source": [ @@ -700,12 +733,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The `score()` method returns the negative inertia. Why negative? Well, it is because a predictor's `score()` method must always respect the \"_great is better_\" rule." + "The `score()` method returns the negative inertia. Why negative? Well, it is because a predictor's `score()` method must always respect the \"_greater is better_\" rule." ] }, { "cell_type": "code", - "execution_count": 32, + "execution_count": 34, "metadata": {}, "outputs": [], "source": [ @@ -728,7 +761,7 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 35, "metadata": {}, "outputs": [], "source": [ @@ -737,7 +770,7 @@ }, { "cell_type": "code", - "execution_count": 34, + "execution_count": 36, "metadata": {}, "outputs": [], "source": [ @@ -760,12 +793,12 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 37, "metadata": {}, "outputs": [], "source": [ "kmeans_rnd_10_inits = KMeans(n_clusters=5, init=\"random\", n_init=10,\n", - " algorithm=\"full\", random_state=11)\n", + " algorithm=\"full\", random_state=2)\n", "kmeans_rnd_10_inits.fit(X)" ] }, @@ -778,7 +811,7 @@ }, { "cell_type": "code", - "execution_count": 36, + "execution_count": 38, "metadata": {}, "outputs": [], "source": [ @@ -820,7 +853,7 @@ }, { "cell_type": "code", - "execution_count": 37, + "execution_count": 39, "metadata": {}, "outputs": [], "source": [ @@ -829,7 +862,7 @@ }, { "cell_type": "code", - "execution_count": 38, + "execution_count": 40, "metadata": {}, "outputs": [], "source": [ @@ -862,22 +895,29 @@ }, { "cell_type": "code", - "execution_count": 39, + "execution_count": 41, "metadata": {}, "outputs": [], "source": [ - "%timeit -n 50 KMeans(algorithm=\"elkan\").fit(X)" + "%timeit -n 50 KMeans(algorithm=\"elkan\", random_state=42).fit(X)" ] }, { "cell_type": "code", - "execution_count": 40, + "execution_count": 42, "metadata": { "scrolled": true }, "outputs": [], "source": [ - "%timeit -n 50 KMeans(algorithm=\"full\").fit(X)" + "%timeit -n 50 KMeans(algorithm=\"full\", random_state=42).fit(X)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "There's no big difference in this case, as the dataset is fairly small." ] }, { @@ -896,7 +936,7 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": 43, "metadata": {}, "outputs": [], "source": [ @@ -905,7 +945,7 @@ }, { "cell_type": "code", - "execution_count": 42, + "execution_count": 44, "metadata": {}, "outputs": [], "source": [ @@ -915,7 +955,7 @@ }, { "cell_type": "code", - "execution_count": 43, + "execution_count": 45, "metadata": {}, "outputs": [], "source": [ @@ -931,11 +971,11 @@ }, { "cell_type": "code", - "execution_count": 44, + "execution_count": 46, "metadata": {}, "outputs": [], "source": [ - "import urllib\n", + "import urllib.request\n", "from sklearn.datasets import fetch_openml\n", "\n", "mnist = fetch_openml('mnist_784', version=1)\n", @@ -944,7 +984,7 @@ }, { "cell_type": "code", - "execution_count": 45, + "execution_count": 47, "metadata": {}, "outputs": [], "source": [ @@ -963,7 +1003,7 @@ }, { "cell_type": "code", - "execution_count": 46, + "execution_count": 48, "metadata": {}, "outputs": [], "source": [ @@ -974,7 +1014,7 @@ }, { "cell_type": "code", - "execution_count": 47, + "execution_count": 49, "metadata": {}, "outputs": [], "source": [ @@ -991,7 +1031,7 @@ }, { "cell_type": "code", - "execution_count": 48, + "execution_count": 50, "metadata": {}, "outputs": [], "source": [ @@ -1008,7 +1048,7 @@ }, { "cell_type": "code", - "execution_count": 49, + "execution_count": 51, "metadata": {}, "outputs": [], "source": [ @@ -1017,7 +1057,7 @@ }, { "cell_type": "code", - "execution_count": 50, + "execution_count": 52, "metadata": {}, "outputs": [], "source": [ @@ -1049,7 +1089,7 @@ }, { "cell_type": "code", - "execution_count": 51, + "execution_count": 53, "metadata": {}, "outputs": [], "source": [ @@ -1065,20 +1105,20 @@ }, { "cell_type": "code", - "execution_count": 52, + "execution_count": 54, "metadata": {}, "outputs": [], "source": [ - "%timeit KMeans(n_clusters=5).fit(X)" + "%timeit KMeans(n_clusters=5, random_state=42).fit(X)" ] }, { "cell_type": "code", - "execution_count": 53, + "execution_count": 55, "metadata": {}, "outputs": [], "source": [ - "%timeit MiniBatchKMeans(n_clusters=5).fit(X)" + "%timeit MiniBatchKMeans(n_clusters=5, random_state=42).fit(X)" ] }, { @@ -1090,7 +1130,7 @@ }, { "cell_type": "code", - "execution_count": 54, + "execution_count": 56, "metadata": {}, "outputs": [], "source": [ @@ -1099,7 +1139,7 @@ }, { "cell_type": "code", - "execution_count": 55, + "execution_count": 57, "metadata": {}, "outputs": [], "source": [ @@ -1117,7 +1157,7 @@ }, { "cell_type": "code", - "execution_count": 56, + "execution_count": 58, "metadata": {}, "outputs": [], "source": [ @@ -1158,7 +1198,7 @@ }, { "cell_type": "code", - "execution_count": 57, + "execution_count": 59, "metadata": {}, "outputs": [], "source": [ @@ -1179,7 +1219,7 @@ }, { "cell_type": "code", - "execution_count": 58, + "execution_count": 60, "metadata": {}, "outputs": [], "source": [ @@ -1188,7 +1228,7 @@ }, { "cell_type": "code", - "execution_count": 59, + "execution_count": 61, "metadata": {}, "outputs": [], "source": [ @@ -1204,7 +1244,7 @@ }, { "cell_type": "code", - "execution_count": 60, + "execution_count": 62, "metadata": {}, "outputs": [], "source": [ @@ -1215,7 +1255,7 @@ }, { "cell_type": "code", - "execution_count": 61, + "execution_count": 63, "metadata": {}, "outputs": [], "source": [ @@ -1244,7 +1284,7 @@ }, { "cell_type": "code", - "execution_count": 62, + "execution_count": 64, "metadata": {}, "outputs": [], "source": [ @@ -1268,7 +1308,7 @@ }, { "cell_type": "code", - "execution_count": 63, + "execution_count": 65, "metadata": {}, "outputs": [], "source": [ @@ -1277,7 +1317,7 @@ }, { "cell_type": "code", - "execution_count": 64, + "execution_count": 66, "metadata": {}, "outputs": [], "source": [ @@ -1286,7 +1326,7 @@ }, { "cell_type": "code", - "execution_count": 65, + "execution_count": 67, "metadata": {}, "outputs": [], "source": [ @@ -1296,7 +1336,7 @@ }, { "cell_type": "code", - "execution_count": 66, + "execution_count": 68, "metadata": {}, "outputs": [], "source": [ @@ -1325,7 +1365,7 @@ }, { "cell_type": "code", - "execution_count": 67, + "execution_count": 69, "metadata": {}, "outputs": [], "source": [ @@ -1371,6 +1411,13 @@ "plt.show()" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As you can see, $k=5$ looks like the best option here, as all clusters are roughly the same size, and they all cross the dashed line, which represents the mean silhouette score." + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -1380,7 +1427,7 @@ }, { "cell_type": "code", - "execution_count": 68, + "execution_count": 70, "metadata": {}, "outputs": [], "source": [ @@ -1394,7 +1441,7 @@ }, { "cell_type": "code", - "execution_count": 69, + "execution_count": 71, "metadata": {}, "outputs": [], "source": [ @@ -1403,7 +1450,7 @@ }, { "cell_type": "code", - "execution_count": 70, + "execution_count": 72, "metadata": {}, "outputs": [], "source": [ @@ -1415,7 +1462,7 @@ }, { "cell_type": "code", - "execution_count": 71, + "execution_count": 73, "metadata": {}, "outputs": [], "source": [ @@ -1442,7 +1489,7 @@ }, { "cell_type": "code", - "execution_count": 72, + "execution_count": 74, "metadata": {}, "outputs": [], "source": [ @@ -1458,7 +1505,7 @@ }, { "cell_type": "code", - "execution_count": 73, + "execution_count": 75, "metadata": {}, "outputs": [], "source": [ @@ -1469,7 +1516,7 @@ }, { "cell_type": "code", - "execution_count": 74, + "execution_count": 76, "metadata": {}, "outputs": [], "source": [ @@ -1481,7 +1528,7 @@ }, { "cell_type": "code", - "execution_count": 75, + "execution_count": 77, "metadata": {}, "outputs": [], "source": [ @@ -1495,7 +1542,7 @@ }, { "cell_type": "code", - "execution_count": 76, + "execution_count": 78, "metadata": {}, "outputs": [], "source": [ @@ -1533,7 +1580,7 @@ }, { "cell_type": "code", - "execution_count": 77, + "execution_count": 79, "metadata": {}, "outputs": [], "source": [ @@ -1542,7 +1589,7 @@ }, { "cell_type": "code", - "execution_count": 78, + "execution_count": 80, "metadata": {}, "outputs": [], "source": [ @@ -1558,7 +1605,7 @@ }, { "cell_type": "code", - "execution_count": 79, + "execution_count": 81, "metadata": {}, "outputs": [], "source": [ @@ -1567,7 +1614,7 @@ }, { "cell_type": "code", - "execution_count": 80, + "execution_count": 82, "metadata": {}, "outputs": [], "source": [ @@ -1583,7 +1630,7 @@ }, { "cell_type": "code", - "execution_count": 81, + "execution_count": 83, "metadata": {}, "outputs": [], "source": [ @@ -1592,7 +1639,7 @@ }, { "cell_type": "code", - "execution_count": 82, + "execution_count": 84, "metadata": {}, "outputs": [], "source": [ @@ -1602,7 +1649,7 @@ }, { "cell_type": "code", - "execution_count": 83, + "execution_count": 85, "metadata": {}, "outputs": [], "source": [ @@ -1618,7 +1665,7 @@ }, { "cell_type": "code", - "execution_count": 84, + "execution_count": 86, "metadata": {}, "outputs": [], "source": [ @@ -1627,7 +1674,7 @@ }, { "cell_type": "code", - "execution_count": 85, + "execution_count": 87, "metadata": {}, "outputs": [], "source": [ @@ -1640,7 +1687,7 @@ }, { "cell_type": "code", - "execution_count": 86, + "execution_count": 88, "metadata": {}, "outputs": [], "source": [ @@ -1649,7 +1696,7 @@ }, { "cell_type": "code", - "execution_count": 87, + "execution_count": 89, "metadata": {}, "outputs": [], "source": [ @@ -1665,16 +1712,23 @@ }, { "cell_type": "code", - "execution_count": 88, + "execution_count": 90, "metadata": {}, "outputs": [], "source": [ "from sklearn.model_selection import GridSearchCV" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Warning**: the following cell may take close to 20 minutes to run, or more depending on your hardware." + ] + }, { "cell_type": "code", - "execution_count": 89, + "execution_count": 91, "metadata": {}, "outputs": [], "source": [ @@ -1692,7 +1746,7 @@ }, { "cell_type": "code", - "execution_count": 90, + "execution_count": 92, "metadata": {}, "outputs": [], "source": [ @@ -1701,7 +1755,7 @@ }, { "cell_type": "code", - "execution_count": 91, + "execution_count": 93, "metadata": {}, "outputs": [], "source": [ @@ -1731,7 +1785,7 @@ }, { "cell_type": "code", - "execution_count": 92, + "execution_count": 94, "metadata": {}, "outputs": [], "source": [ @@ -1740,7 +1794,7 @@ }, { "cell_type": "code", - "execution_count": 93, + "execution_count": 95, "metadata": {}, "outputs": [], "source": [ @@ -1758,7 +1812,7 @@ }, { "cell_type": "code", - "execution_count": 94, + "execution_count": 96, "metadata": {}, "outputs": [], "source": [ @@ -1767,7 +1821,7 @@ }, { "cell_type": "code", - "execution_count": 95, + "execution_count": 97, "metadata": {}, "outputs": [], "source": [ @@ -1786,7 +1840,7 @@ }, { "cell_type": "code", - "execution_count": 96, + "execution_count": 98, "metadata": {}, "outputs": [], "source": [ @@ -1802,16 +1856,25 @@ }, { "cell_type": "code", - "execution_count": 97, + "execution_count": 99, + "metadata": {}, + "outputs": [], + "source": [ + "y_train[representative_digit_idx]" + ] + }, + { + "cell_type": "code", + "execution_count": 100, "metadata": {}, "outputs": [], "source": [ "y_representative_digits = np.array([\n", - " 4, 8, 0, 6, 8, 3, 7, 7, 9, 2,\n", - " 5, 5, 8, 5, 2, 1, 2, 9, 6, 1,\n", - " 1, 6, 9, 0, 8, 3, 0, 7, 4, 1,\n", - " 6, 5, 2, 4, 1, 8, 6, 3, 9, 2,\n", - " 4, 2, 9, 4, 7, 6, 2, 3, 1, 1])" + " 0, 1, 3, 2, 7, 6, 4, 6, 9, 5,\n", + " 1, 2, 9, 5, 2, 7, 8, 1, 8, 6,\n", + " 3, 2, 5, 4, 5, 4, 0, 3, 2, 6,\n", + " 1, 7, 7, 9, 1, 8, 6, 5, 4, 8,\n", + " 5, 3, 3, 6, 7, 9, 7, 8, 4, 9])" ] }, { @@ -1823,7 +1886,7 @@ }, { "cell_type": "code", - "execution_count": 98, + "execution_count": 101, "metadata": {}, "outputs": [], "source": [ @@ -1836,7 +1899,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Wow! We jumped from 83.3% accuracy to 92.2%, although we are still only training the model on 50 instances. Since it's often costly and painful to label instances, especially when it has to be done manually by experts, it's a good idea to make them label representative instances rather than just random instances." + "Wow! We jumped from 83.3% accuracy to 91.3%, although we are still only training the model on 50 instances. Since it's often costly and painful to label instances, especially when it has to be done manually by experts, it's a good idea to make them label representative instances rather than just random instances." ] }, { @@ -1848,7 +1911,7 @@ }, { "cell_type": "code", - "execution_count": 99, + "execution_count": 102, "metadata": {}, "outputs": [], "source": [ @@ -1859,7 +1922,7 @@ }, { "cell_type": "code", - "execution_count": 100, + "execution_count": 103, "metadata": {}, "outputs": [], "source": [ @@ -1869,7 +1932,7 @@ }, { "cell_type": "code", - "execution_count": 101, + "execution_count": 104, "metadata": {}, "outputs": [], "source": [ @@ -1880,16 +1943,16 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We got a tiny little accuracy boost. Better than nothing, but we should probably have propagated the labels only to the instances closest to the centroid, because by propagating to the full cluster, we have certainly included some outliers. Let's only propagate the labels to the 20th percentile closest to the centroid:" + "We got a tiny little accuracy boost. Better than nothing, but we should probably have propagated the labels only to the instances closest to the centroid, because by propagating to the full cluster, we have certainly included some outliers. Let's only propagate the labels to the 75th percentile closest to the centroid:" ] }, { "cell_type": "code", - "execution_count": 102, + "execution_count": 105, "metadata": {}, "outputs": [], "source": [ - "percentile_closest = 20\n", + "percentile_closest = 75\n", "\n", "X_cluster_dist = X_digits_dist[np.arange(len(X_train)), kmeans.labels_]\n", "for i in range(k):\n", @@ -1902,7 +1965,7 @@ }, { "cell_type": "code", - "execution_count": 103, + "execution_count": 106, "metadata": {}, "outputs": [], "source": [ @@ -1913,7 +1976,7 @@ }, { "cell_type": "code", - "execution_count": 104, + "execution_count": 107, "metadata": {}, "outputs": [], "source": [ @@ -1923,7 +1986,7 @@ }, { "cell_type": "code", - "execution_count": 105, + "execution_count": 108, "metadata": {}, "outputs": [], "source": [ @@ -1934,19 +1997,19 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Nice! With just 50 labeled instances (just 5 examples per class on average!), we got 94% performance, which is pretty close to the performance of logistic regression on the fully labeled _digits_ dataset (which was 96.9%)." + "A bit better. With just 50 labeled instances (just 5 examples per class on average!), we got 92.7% performance, which is getting closer to the performance of logistic regression on the fully labeled _digits_ dataset (which was 96.9%)." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "This is because the propagated labels are actually pretty good: their accuracy is very close to 99%:" + "This is because the propagated labels are actually pretty good: their accuracy is close to 96%:" ] }, { "cell_type": "code", - "execution_count": 106, + "execution_count": 109, "metadata": {}, "outputs": [], "source": [ @@ -1971,7 +2034,7 @@ }, { "cell_type": "code", - "execution_count": 107, + "execution_count": 110, "metadata": {}, "outputs": [], "source": [ @@ -1980,7 +2043,7 @@ }, { "cell_type": "code", - "execution_count": 108, + "execution_count": 111, "metadata": {}, "outputs": [], "source": [ @@ -1989,7 +2052,7 @@ }, { "cell_type": "code", - "execution_count": 109, + "execution_count": 112, "metadata": {}, "outputs": [], "source": [ @@ -1998,7 +2061,7 @@ }, { "cell_type": "code", - "execution_count": 110, + "execution_count": 113, "metadata": {}, "outputs": [], "source": [ @@ -2008,7 +2071,7 @@ }, { "cell_type": "code", - "execution_count": 111, + "execution_count": 114, "metadata": {}, "outputs": [], "source": [ @@ -2017,7 +2080,7 @@ }, { "cell_type": "code", - "execution_count": 112, + "execution_count": 115, "metadata": {}, "outputs": [], "source": [ @@ -2026,7 +2089,7 @@ }, { "cell_type": "code", - "execution_count": 113, + "execution_count": 116, "metadata": {}, "outputs": [], "source": [ @@ -2035,7 +2098,7 @@ }, { "cell_type": "code", - "execution_count": 114, + "execution_count": 117, "metadata": {}, "outputs": [], "source": [ @@ -2044,7 +2107,7 @@ }, { "cell_type": "code", - "execution_count": 115, + "execution_count": 118, "metadata": {}, "outputs": [], "source": [ @@ -2053,7 +2116,7 @@ }, { "cell_type": "code", - "execution_count": 116, + "execution_count": 119, "metadata": {}, "outputs": [], "source": [ @@ -2063,7 +2126,7 @@ }, { "cell_type": "code", - "execution_count": 117, + "execution_count": 120, "metadata": {}, "outputs": [], "source": [ @@ -2096,7 +2159,7 @@ }, { "cell_type": "code", - "execution_count": 118, + "execution_count": 121, "metadata": {}, "outputs": [], "source": [ @@ -2114,7 +2177,7 @@ }, { "cell_type": "code", - "execution_count": 119, + "execution_count": 122, "metadata": {}, "outputs": [], "source": [ @@ -2123,7 +2186,7 @@ }, { "cell_type": "code", - "execution_count": 120, + "execution_count": 123, "metadata": {}, "outputs": [], "source": [ @@ -2132,7 +2195,7 @@ }, { "cell_type": "code", - "execution_count": 121, + "execution_count": 124, "metadata": {}, "outputs": [], "source": [ @@ -2142,7 +2205,7 @@ }, { "cell_type": "code", - "execution_count": 122, + "execution_count": 125, "metadata": {}, "outputs": [], "source": [ @@ -2152,7 +2215,7 @@ }, { "cell_type": "code", - "execution_count": 123, + "execution_count": 126, "metadata": {}, "outputs": [], "source": [ @@ -2161,7 +2224,7 @@ }, { "cell_type": "code", - "execution_count": 124, + "execution_count": 127, "metadata": {}, "outputs": [], "source": [ @@ -2174,7 +2237,7 @@ }, { "cell_type": "code", - "execution_count": 125, + "execution_count": 128, "metadata": {}, "outputs": [], "source": [ @@ -2200,7 +2263,7 @@ }, { "cell_type": "code", - "execution_count": 126, + "execution_count": 129, "metadata": {}, "outputs": [], "source": [ @@ -2209,7 +2272,7 @@ }, { "cell_type": "code", - "execution_count": 127, + "execution_count": 130, "metadata": {}, "outputs": [], "source": [ @@ -2219,7 +2282,7 @@ }, { "cell_type": "code", - "execution_count": 128, + "execution_count": 131, "metadata": {}, "outputs": [], "source": [ @@ -2229,7 +2292,7 @@ }, { "cell_type": "code", - "execution_count": 129, + "execution_count": 132, "metadata": {}, "outputs": [], "source": [ @@ -2238,7 +2301,7 @@ }, { "cell_type": "code", - "execution_count": 130, + "execution_count": 133, "metadata": {}, "outputs": [], "source": [ @@ -2260,7 +2323,7 @@ }, { "cell_type": "code", - "execution_count": 131, + "execution_count": 134, "metadata": {}, "outputs": [], "source": [ @@ -2284,7 +2347,7 @@ }, { "cell_type": "code", - "execution_count": 132, + "execution_count": 135, "metadata": {}, "outputs": [], "source": [ @@ -2293,7 +2356,7 @@ }, { "cell_type": "code", - "execution_count": 133, + "execution_count": 136, "metadata": {}, "outputs": [], "source": [ @@ -2303,7 +2366,7 @@ }, { "cell_type": "code", - "execution_count": 134, + "execution_count": 137, "metadata": {}, "outputs": [], "source": [ @@ -2314,7 +2377,7 @@ }, { "cell_type": "code", - "execution_count": 135, + "execution_count": 138, "metadata": {}, "outputs": [], "source": [ @@ -2323,7 +2386,7 @@ }, { "cell_type": "code", - "execution_count": 136, + "execution_count": 139, "metadata": { "scrolled": true }, @@ -2341,7 +2404,7 @@ }, { "cell_type": "code", - "execution_count": 137, + "execution_count": 140, "metadata": {}, "outputs": [], "source": [ @@ -2362,7 +2425,7 @@ }, { "cell_type": "code", - "execution_count": 138, + "execution_count": 141, "metadata": {}, "outputs": [], "source": [ @@ -2371,7 +2434,7 @@ }, { "cell_type": "code", - "execution_count": 139, + "execution_count": 142, "metadata": {}, "outputs": [], "source": [ @@ -2388,7 +2451,7 @@ }, { "cell_type": "code", - "execution_count": 140, + "execution_count": 143, "metadata": {}, "outputs": [], "source": [ @@ -2397,7 +2460,7 @@ }, { "cell_type": "code", - "execution_count": 141, + "execution_count": 144, "metadata": {}, "outputs": [], "source": [ @@ -2406,7 +2469,7 @@ }, { "cell_type": "code", - "execution_count": 142, + "execution_count": 145, "metadata": {}, "outputs": [], "source": [ @@ -2422,7 +2485,7 @@ }, { "cell_type": "code", - "execution_count": 143, + "execution_count": 146, "metadata": {}, "outputs": [], "source": [ @@ -2438,7 +2501,7 @@ }, { "cell_type": "code", - "execution_count": 144, + "execution_count": 147, "metadata": {}, "outputs": [], "source": [ @@ -2454,7 +2517,7 @@ }, { "cell_type": "code", - "execution_count": 145, + "execution_count": 148, "metadata": {}, "outputs": [], "source": [ @@ -2463,7 +2526,7 @@ }, { "cell_type": "code", - "execution_count": 146, + "execution_count": 149, "metadata": {}, "outputs": [], "source": [ @@ -2479,7 +2542,7 @@ }, { "cell_type": "code", - "execution_count": 147, + "execution_count": 150, "metadata": {}, "outputs": [], "source": [ @@ -2489,7 +2552,7 @@ }, { "cell_type": "code", - "execution_count": 148, + "execution_count": 151, "metadata": {}, "outputs": [], "source": [ @@ -2512,7 +2575,7 @@ }, { "cell_type": "code", - "execution_count": 149, + "execution_count": 152, "metadata": {}, "outputs": [], "source": [ @@ -2528,7 +2591,7 @@ }, { "cell_type": "code", - "execution_count": 150, + "execution_count": 153, "metadata": {}, "outputs": [], "source": [ @@ -2551,7 +2614,7 @@ }, { "cell_type": "code", - "execution_count": 151, + "execution_count": 154, "metadata": {}, "outputs": [], "source": [ @@ -2590,7 +2653,7 @@ }, { "cell_type": "code", - "execution_count": 152, + "execution_count": 155, "metadata": {}, "outputs": [], "source": [ @@ -2615,7 +2678,7 @@ }, { "cell_type": "code", - "execution_count": 153, + "execution_count": 156, "metadata": {}, "outputs": [], "source": [ @@ -2631,7 +2694,7 @@ }, { "cell_type": "code", - "execution_count": 154, + "execution_count": 157, "metadata": {}, "outputs": [], "source": [ @@ -2649,7 +2712,7 @@ }, { "cell_type": "code", - "execution_count": 155, + "execution_count": 158, "metadata": {}, "outputs": [], "source": [ @@ -2661,7 +2724,7 @@ }, { "cell_type": "code", - "execution_count": 156, + "execution_count": 159, "metadata": {}, "outputs": [], "source": [ @@ -2686,7 +2749,7 @@ }, { "cell_type": "code", - "execution_count": 157, + "execution_count": 160, "metadata": {}, "outputs": [], "source": [ @@ -2697,7 +2760,7 @@ }, { "cell_type": "code", - "execution_count": 158, + "execution_count": 161, "metadata": {}, "outputs": [], "source": [ @@ -2737,7 +2800,7 @@ }, { "cell_type": "code", - "execution_count": 159, + "execution_count": 162, "metadata": {}, "outputs": [], "source": [ @@ -2746,7 +2809,7 @@ }, { "cell_type": "code", - "execution_count": 160, + "execution_count": 163, "metadata": {}, "outputs": [], "source": [ @@ -2762,7 +2825,7 @@ }, { "cell_type": "code", - "execution_count": 161, + "execution_count": 164, "metadata": {}, "outputs": [], "source": [ @@ -2779,7 +2842,7 @@ }, { "cell_type": "code", - "execution_count": 162, + "execution_count": 165, "metadata": {}, "outputs": [], "source": [ @@ -2788,7 +2851,7 @@ }, { "cell_type": "code", - "execution_count": 163, + "execution_count": 166, "metadata": {}, "outputs": [], "source": [ @@ -2811,7 +2874,7 @@ }, { "cell_type": "code", - "execution_count": 164, + "execution_count": 167, "metadata": {}, "outputs": [], "source": [ @@ -2821,7 +2884,7 @@ }, { "cell_type": "code", - "execution_count": 165, + "execution_count": 168, "metadata": {}, "outputs": [], "source": [ @@ -2831,7 +2894,7 @@ }, { "cell_type": "code", - "execution_count": 166, + "execution_count": 169, "metadata": {}, "outputs": [], "source": [ @@ -2862,7 +2925,7 @@ }, { "cell_type": "code", - "execution_count": 167, + "execution_count": 170, "metadata": {}, "outputs": [], "source": [ @@ -2881,7 +2944,7 @@ }, { "cell_type": "code", - "execution_count": 168, + "execution_count": 171, "metadata": {}, "outputs": [], "source": [ @@ -2890,7 +2953,7 @@ }, { "cell_type": "code", - "execution_count": 169, + "execution_count": 172, "metadata": {}, "outputs": [], "source": [ @@ -2913,7 +2976,7 @@ }, { "cell_type": "code", - "execution_count": 170, + "execution_count": 173, "metadata": {}, "outputs": [], "source": [ @@ -2922,7 +2985,7 @@ }, { "cell_type": "code", - "execution_count": 171, + "execution_count": 174, "metadata": {}, "outputs": [], "source": [ @@ -2939,7 +3002,7 @@ }, { "cell_type": "code", - "execution_count": 172, + "execution_count": 175, "metadata": {}, "outputs": [], "source": [ @@ -2948,7 +3011,7 @@ }, { "cell_type": "code", - "execution_count": 173, + "execution_count": 176, "metadata": {}, "outputs": [], "source": [ @@ -2959,7 +3022,7 @@ }, { "cell_type": "code", - "execution_count": 174, + "execution_count": 177, "metadata": {}, "outputs": [], "source": [ @@ -2974,7 +3037,7 @@ }, { "cell_type": "code", - "execution_count": 175, + "execution_count": 178, "metadata": {}, "outputs": [], "source": [ @@ -2983,7 +3046,7 @@ }, { "cell_type": "code", - "execution_count": 176, + "execution_count": 179, "metadata": {}, "outputs": [], "source": [ @@ -2992,7 +3055,7 @@ }, { "cell_type": "code", - "execution_count": 177, + "execution_count": 180, "metadata": {}, "outputs": [], "source": [ @@ -3019,7 +3082,7 @@ }, { "cell_type": "code", - "execution_count": 178, + "execution_count": 181, "metadata": {}, "outputs": [], "source": [ @@ -3028,7 +3091,7 @@ }, { "cell_type": "code", - "execution_count": 179, + "execution_count": 182, "metadata": { "scrolled": true }, @@ -3040,7 +3103,7 @@ }, { "cell_type": "code", - "execution_count": 180, + "execution_count": 183, "metadata": {}, "outputs": [], "source": [ @@ -3074,7 +3137,7 @@ }, { "cell_type": "code", - "execution_count": 181, + "execution_count": 184, "metadata": {}, "outputs": [], "source": [ @@ -3083,7 +3146,7 @@ }, { "cell_type": "code", - "execution_count": 182, + "execution_count": 185, "metadata": {}, "outputs": [], "source": [ @@ -3096,7 +3159,7 @@ }, { "cell_type": "code", - "execution_count": 183, + "execution_count": 186, "metadata": {}, "outputs": [], "source": [ @@ -3204,7 +3267,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 187, "metadata": {}, "outputs": [], "source": [ @@ -3215,7 +3278,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 188, "metadata": {}, "outputs": [], "source": [ @@ -3224,7 +3287,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 189, "metadata": {}, "outputs": [], "source": [ @@ -3240,7 +3303,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 190, "metadata": {}, "outputs": [], "source": [ @@ -3263,7 +3326,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 191, "metadata": {}, "outputs": [], "source": [ @@ -3281,7 +3344,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 192, "metadata": {}, "outputs": [], "source": [ @@ -3304,7 +3367,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 193, "metadata": {}, "outputs": [], "source": [ @@ -3320,7 +3383,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 194, "metadata": {}, "outputs": [], "source": [ @@ -3342,7 +3405,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 195, "metadata": {}, "outputs": [], "source": [ @@ -3353,12 +3416,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "It looks like the best number of clusters is quite high, at 120. You might have expected it to be 40, since there are 40 different people on the pictures. However, the same person may look quite different on different pictures (e.g., with or without glasses, or simply shifted left or right)." + "It looks like the best number of clusters is quite high, at 100. You might have expected it to be 40, since there are 40 different people on the pictures. However, the same person may look quite different on different pictures (e.g., with or without glasses, or simply shifted left or right)." ] }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 196, "metadata": {}, "outputs": [], "source": [ @@ -3377,12 +3440,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The optimal number of clusters is not clear on this inertia diagram, as there is no obvious elbow, so let's stick with k=120." + "The optimal number of clusters is not clear on this inertia diagram, as there is no obvious elbow, so let's stick with k=100." ] }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 197, "metadata": {}, "outputs": [], "source": [ @@ -3398,7 +3461,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 198, "metadata": {}, "outputs": [], "source": [ @@ -3445,7 +3508,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 199, "metadata": {}, "outputs": [], "source": [ @@ -3465,7 +3528,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 200, "metadata": {}, "outputs": [], "source": [ @@ -3502,7 +3565,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 201, "metadata": {}, "outputs": [], "source": [ @@ -3533,7 +3596,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 202, "metadata": {}, "outputs": [], "source": [ @@ -3544,7 +3607,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 203, "metadata": {}, "outputs": [], "source": [ @@ -3576,7 +3639,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 204, "metadata": {}, "outputs": [], "source": [ @@ -3595,7 +3658,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 205, "metadata": {}, "outputs": [], "source": [ @@ -3606,7 +3669,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 206, "metadata": {}, "outputs": [], "source": [ @@ -3622,7 +3685,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 207, "metadata": {}, "outputs": [], "source": [ @@ -3650,7 +3713,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 208, "metadata": {}, "outputs": [], "source": [ @@ -3659,7 +3722,7 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 209, "metadata": {}, "outputs": [], "source": [ @@ -3675,7 +3738,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 210, "metadata": {}, "outputs": [], "source": [ @@ -3705,7 +3768,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 211, "metadata": {}, "outputs": [], "source": [ @@ -3714,7 +3777,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 212, "metadata": {}, "outputs": [], "source": [ @@ -3727,7 +3790,7 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 213, "metadata": {}, "outputs": [], "source": [ @@ -3736,7 +3799,7 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 214, "metadata": {}, "outputs": [], "source": [ @@ -3745,7 +3808,7 @@ }, { "cell_type": "code", - "execution_count": 32, + "execution_count": 215, "metadata": {}, "outputs": [], "source": [ @@ -3754,7 +3817,7 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 216, "metadata": {}, "outputs": [], "source": [ @@ -3786,7 +3849,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.6" + "version": "3.7.9" } }, "nbformat": 4, diff --git a/10_neural_nets_with_keras.ipynb b/10_neural_nets_with_keras.ipynb index caeac36..70b806e 100644 --- a/10_neural_nets_with_keras.ipynb +++ b/10_neural_nets_with_keras.ipynb @@ -84,11 +84,7 @@ " print(\"Saving figure\", fig_id)\n", " if tight_layout:\n", " plt.tight_layout()\n", - " plt.savefig(path, format=fig_extension, dpi=resolution)\n", - "\n", - "# Ignore useless warnings (see SciPy issue #5998)\n", - "import warnings\n", - "warnings.filterwarnings(action=\"ignore\", message=\"^internal gelsd\")" + " plt.savefig(path, format=fig_extension, dpi=resolution)\n" ] }, { @@ -735,13 +731,21 @@ "y_proba.round(2)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Warning**: `model.predict_classes(X_new)` is deprecated. It is replaced with `np.argmax(model.predict(X_new), axis=-1)`." + ] + }, { "cell_type": "code", "execution_count": 44, "metadata": {}, "outputs": [], "source": [ - "y_pred = model.predict_classes(X_new)\n", + "#y_pred = model.predict_classes(X_new) # deprecated\n", + "y_pred = np.argmax(model.predict(X_new), axis=-1)\n", "y_pred" ] }, @@ -1514,7 +1518,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "**Warning**: the following cell crashes at the end of training. This seems to be caused by [Keras issue #13586](https://github.com/keras-team/keras/issues/13586), which was triggered by a recent change in Scikit-Learn. [Pull Request #13598](https://github.com/keras-team/keras/pull/13598) seems to fix the issue, so this problem should be resolved soon." + "**Warning**: the following cell crashes at the end of training. This seems to be caused by [Keras issue #13586](https://github.com/keras-team/keras/issues/13586), which was triggered by a recent change in Scikit-Learn. [Pull Request #13598](https://github.com/keras-team/keras/pull/13598) seems to fix the issue, so this problem should be resolved soon. In the meantime, I've added `.tolist()` and `.rvs(1000).tolist()` as workarounds." ] }, { @@ -1528,8 +1532,8 @@ "\n", "param_distribs = {\n", " \"n_hidden\": [0, 1, 2, 3],\n", - " \"n_neurons\": np.arange(1, 100),\n", - " \"learning_rate\": reciprocal(3e-4, 3e-2),\n", + " \"n_neurons\": np.arange(1, 100) .tolist(),\n", + " \"learning_rate\": reciprocal(3e-4, 3e-2) .rvs(1000).tolist(),\n", "}\n", "\n", "rnd_search_cv = RandomizedSearchCV(keras_reg, param_distribs, n_iter=10, cv=3, verbose=2)\n", @@ -1888,6 +1892,7 @@ "plt.gca().set_xscale('log')\n", "plt.hlines(min(expon_lr.losses), min(expon_lr.rates), max(expon_lr.rates))\n", "plt.axis([min(expon_lr.rates), max(expon_lr.rates), 0, expon_lr.losses[0]])\n", + "plt.grid()\n", "plt.xlabel(\"Learning rate\")\n", "plt.ylabel(\"Loss\")" ] @@ -1896,7 +1901,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The loss starts shooting back up violently around 3e-1, so let's try using 2e-1 as our learning rate:" + "The loss starts shooting back up violently when the learning rate goes over 6e-1, so let's try using half of that, at 3e-1:" ] }, { @@ -1931,7 +1936,7 @@ "outputs": [], "source": [ "model.compile(loss=\"sparse_categorical_crossentropy\",\n", - " optimizer=keras.optimizers.SGD(lr=2e-1),\n", + " optimizer=keras.optimizers.SGD(lr=3e-1),\n", " metrics=[\"accuracy\"])" ] }, @@ -1958,7 +1963,7 @@ "\n", "history = model.fit(X_train, y_train, epochs=100,\n", " validation_data=(X_valid, y_valid),\n", - " callbacks=[early_stopping_cb, checkpoint_cb, tensorboard_cb])" + " callbacks=[checkpoint_cb, early_stopping_cb, tensorboard_cb])" ] }, { @@ -2011,7 +2016,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.6" + "version": "3.7.9" }, "nav_menu": { "height": "264px", diff --git a/11_training_deep_neural_networks.ipynb b/11_training_deep_neural_networks.ipynb index 9621268..207e64f 100644 --- a/11_training_deep_neural_networks.ipynb +++ b/11_training_deep_neural_networks.ipynb @@ -1039,7 +1039,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Great! We got quite a bit of transfer: the error rate dropped by a factor of 4!" + "Great! We got quite a bit of transfer: the error rate dropped by a factor of 4.5!" ] }, { @@ -1048,7 +1048,7 @@ "metadata": {}, "outputs": [], "source": [ - "(100 - 96.95) / (100 - 99.25)" + "(100 - 97.05) / (100 - 99.35)" ] }, { @@ -2274,7 +2274,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The model with the lowest validation loss gets about 47% accuracy on the validation set. It took 39 epochs to reach the lowest validation loss, with roughly 10 seconds per epoch on my laptop (without a GPU). Let's see if we can improve performance using Batch Normalization." + "The model with the lowest validation loss gets about 47.6% accuracy on the validation set. It took 27 epochs to reach the lowest validation loss, with roughly 8 seconds per epoch on my laptop (without a GPU). Let's see if we can improve performance using Batch Normalization." ] }, { @@ -2339,9 +2339,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "* *Is the model converging faster than before?* Much faster! The previous model took 39 epochs to reach the lowest validation loss, while the new model with BN took 18 epochs. That's more than twice as fast as the previous model. The BN layers stabilized training and allowed us to use a much larger learning rate, so convergence was faster.\n", - "* *Does BN produce a better model?* Yes! The final model is also much better, with 55% accuracy instead of 47%. It's still not a very good model, but at least it's much better than before (a Convolutional Neural Network would do much better, but that's a different topic, see chapter 14).\n", - "* *How does BN affect training speed?* Although the model converged twice as fast, each epoch took about 16s instead of 10s, because of the extra computations required by the BN layers. So overall, although the number of epochs was reduced by 50%, the training time (wall time) was shortened by 30%. Which is still pretty significant!" + "* *Is the model converging faster than before?* Much faster! The previous model took 27 epochs to reach the lowest validation loss, while the new model achieved that same loss in just 5 epochs and continued to make progress until the 16th epoch. The BN layers stabilized training and allowed us to use a much larger learning rate, so convergence was faster.\n", + "* *Does BN produce a better model?* Yes! The final model is also much better, with 54.0% accuracy instead of 47.6%. It's still not a very good model, but at least it's much better than before (a Convolutional Neural Network would do much better, but that's a different topic, see chapter 14).\n", + "* *How does BN affect training speed?* Although the model converged much faster, each epoch took about 12s instead of 8s, because of the extra computations required by the BN layers. But overall the training time (wall time) was shortened significantly!" ] }, { @@ -2412,7 +2412,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We get 51.4% accuracy, which is better than the original model, but not quite as good as the model using batch normalization. Moreover, it took 13 epochs to reach the best model, which is much faster than both the original model and the BN model, plus each epoch took only 10 seconds, just like the original model. So it's by far the fastest model to train (both in terms of epochs and wall time)." + "We get 47.9% accuracy, which is not much better than the original model (47.6%), and not as good as the model using batch normalization (54.0%). However, convergence was almost as fast as with the BN model, plus each epoch took only 7 seconds. So it's by far the fastest model to train so far." ] }, { @@ -2473,7 +2473,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The model reaches 50.8% accuracy on the validation set. That's very slightly worse than without dropout (51.4%). With an extensive hyperparameter search, it might be possible to do better (I tried dropout rates of 5%, 10%, 20% and 40%, and learning rates 1e-4, 3e-4, 5e-4, and 1e-3), but probably not much better in this case." + "The model reaches 48.9% accuracy on the validation set. That's very slightly better than without dropout (47.6%). With an extensive hyperparameter search, it might be possible to do better (I tried dropout rates of 5%, 10%, 20% and 40%, and learning rates 1e-4, 3e-4, 5e-4, and 1e-3), but probably not much better in this case." ] }, { @@ -2561,7 +2561,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We only get virtually no accuracy improvement in this case (from 50.8% to 50.9%).\n", + "We get no accuracy improvement in this case (we're still at 48.9% accuracy).\n", "\n", "So the best model we got in this exercise is the Batch Normalization model." ] @@ -2655,7 +2655,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "One cycle allowed us to train the model in just 15 epochs, each taking only 3 seconds (thanks to the larger batch size). This is over 3 times faster than the fastest model we trained so far. Moreover, we improved the model's performance (from 50.8% to 52.8%). The batch normalized model reaches a slightly better performance, but it's much slower to train." + "One cycle allowed us to train the model in just 15 epochs, each taking only 2 seconds (thanks to the larger batch size). This is several times faster than the fastest model we trained so far. Moreover, we improved the model's performance (from 47.6% to 52.0%). The batch normalized model reaches a slightly better performance (54%), but it's much slower to train." ] }, { @@ -2682,7 +2682,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.6" + "version": "3.7.9" }, "nav_menu": { "height": "360px", diff --git a/12_custom_models_and_training_with_tensorflow.ipynb b/12_custom_models_and_training_with_tensorflow.ipynb index 6e59a58..2826b8a 100644 --- a/12_custom_models_and_training_with_tensorflow.ipynb +++ b/12_custom_models_and_training_with_tensorflow.ipynb @@ -59,10 +59,11 @@ "except Exception:\n", " pass\n", "\n", - "# TensorFlow ≥2.0 is required\n", + "# TensorFlow ≥2.4 is required in this notebook\n", + "# Earlier 2.x versions will mostly work the same, but with a few bugs\n", "import tensorflow as tf\n", "from tensorflow import keras\n", - "assert tf.__version__ >= \"2.0\"\n", + "assert tf.__version__ >= \"2.4\"\n", "\n", "# Common imports\n", "import numpy as np\n", @@ -1033,8 +1034,8 @@ "metadata": {}, "outputs": [], "source": [ - "#model = keras.models.load_model(\"my_model_with_a_custom_loss_class.h5\", # TODO: check PR #25956\n", - "# custom_objects={\"HuberLoss\": HuberLoss})" + "model = keras.models.load_model(\"my_model_with_a_custom_loss_class.h5\",\n", + " custom_objects={\"HuberLoss\": HuberLoss})" ] }, { @@ -1052,16 +1053,6 @@ "execution_count": 82, "metadata": {}, "outputs": [], - "source": [ - "#model = keras.models.load_model(\"my_model_with_a_custom_loss_class.h5\", # TODO: check PR #25956\n", - "# custom_objects={\"HuberLoss\": HuberLoss})" - ] - }, - { - "cell_type": "code", - "execution_count": 83, - "metadata": {}, - "outputs": [], "source": [ "model.loss.threshold" ] @@ -1075,7 +1066,7 @@ }, { "cell_type": "code", - "execution_count": 84, + "execution_count": 83, "metadata": {}, "outputs": [], "source": [ @@ -1086,7 +1077,7 @@ }, { "cell_type": "code", - "execution_count": 85, + "execution_count": 84, "metadata": {}, "outputs": [], "source": [ @@ -1106,7 +1097,7 @@ }, { "cell_type": "code", - "execution_count": 86, + "execution_count": 85, "metadata": {}, "outputs": [], "source": [ @@ -1118,7 +1109,7 @@ }, { "cell_type": "code", - "execution_count": 87, + "execution_count": 86, "metadata": {}, "outputs": [], "source": [ @@ -1129,7 +1120,7 @@ }, { "cell_type": "code", - "execution_count": 88, + "execution_count": 87, "metadata": {}, "outputs": [], "source": [ @@ -1145,7 +1136,7 @@ }, { "cell_type": "code", - "execution_count": 89, + "execution_count": 88, "metadata": {}, "outputs": [], "source": [ @@ -1154,7 +1145,7 @@ }, { "cell_type": "code", - "execution_count": 90, + "execution_count": 89, "metadata": {}, "outputs": [], "source": [ @@ -1164,7 +1155,7 @@ }, { "cell_type": "code", - "execution_count": 91, + "execution_count": 90, "metadata": {}, "outputs": [], "source": [ @@ -1173,7 +1164,7 @@ }, { "cell_type": "code", - "execution_count": 92, + "execution_count": 91, "metadata": {}, "outputs": [], "source": [ @@ -1189,7 +1180,7 @@ }, { "cell_type": "code", - "execution_count": 93, + "execution_count": 92, "metadata": {}, "outputs": [], "source": [ @@ -1204,7 +1195,7 @@ }, { "cell_type": "code", - "execution_count": 94, + "execution_count": 93, "metadata": {}, "outputs": [], "source": [ @@ -1215,7 +1206,7 @@ }, { "cell_type": "code", - "execution_count": 95, + "execution_count": 94, "metadata": {}, "outputs": [], "source": [ @@ -1231,7 +1222,7 @@ }, { "cell_type": "code", - "execution_count": 96, + "execution_count": 95, "metadata": {}, "outputs": [], "source": [ @@ -1240,7 +1231,7 @@ }, { "cell_type": "code", - "execution_count": 97, + "execution_count": 96, "metadata": {}, "outputs": [], "source": [ @@ -1250,7 +1241,7 @@ }, { "cell_type": "code", - "execution_count": 98, + "execution_count": 97, "metadata": {}, "outputs": [], "source": [ @@ -1259,7 +1250,7 @@ }, { "cell_type": "code", - "execution_count": 99, + "execution_count": 98, "metadata": {}, "outputs": [], "source": [ @@ -1282,7 +1273,7 @@ }, { "cell_type": "code", - "execution_count": 100, + "execution_count": 99, "metadata": {}, "outputs": [], "source": [ @@ -1293,7 +1284,7 @@ }, { "cell_type": "code", - "execution_count": 101, + "execution_count": 100, "metadata": {}, "outputs": [], "source": [ @@ -1306,7 +1297,7 @@ }, { "cell_type": "code", - "execution_count": 102, + "execution_count": 101, "metadata": {}, "outputs": [], "source": [ @@ -1315,7 +1306,7 @@ }, { "cell_type": "code", - "execution_count": 103, + "execution_count": 102, "metadata": {}, "outputs": [], "source": [ @@ -1326,7 +1317,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "**Warning**: if you use the same function as the loss and a metric, you may be surprised to see different results. This is generally just due to floating point precision errors: even though the mathematical equations are equivalent, the operations are not run in the same order, which can lead to small differences. Moreover, when using sample weights, there's more than just precision errors:\n", + "**Note**: if you use the same function as the loss and a metric, you may be surprised to see different results. This is generally just due to floating point precision errors: even though the mathematical equations are equivalent, the operations are not run in the same order, which can lead to small differences. Moreover, when using sample weights, there's more than just precision errors:\n", "* the loss since the start of the epoch is the mean of all batch losses seen so far. Each batch loss is the sum of the weighted instance losses divided by the _batch size_ (not the sum of weights, so the batch loss is _not_ the weighted mean of the losses).\n", "* the metric since the start of the epoch is equal to the sum of weighted instance losses divided by sum of all weights seen so far. In other words, it is the weighted mean of all the instance losses. Not the same thing.\n", "\n", @@ -1335,7 +1326,7 @@ }, { "cell_type": "code", - "execution_count": 104, + "execution_count": 103, "metadata": {}, "outputs": [], "source": [ @@ -1344,7 +1335,7 @@ }, { "cell_type": "code", - "execution_count": 105, + "execution_count": 104, "metadata": {}, "outputs": [], "source": [ @@ -1354,7 +1345,7 @@ }, { "cell_type": "code", - "execution_count": 106, + "execution_count": 105, "metadata": {}, "outputs": [], "source": [ @@ -1370,7 +1361,7 @@ }, { "cell_type": "code", - "execution_count": 107, + "execution_count": 106, "metadata": {}, "outputs": [], "source": [ @@ -1380,7 +1371,7 @@ }, { "cell_type": "code", - "execution_count": 108, + "execution_count": 107, "metadata": {}, "outputs": [], "source": [ @@ -1389,7 +1380,7 @@ }, { "cell_type": "code", - "execution_count": 109, + "execution_count": 108, "metadata": {}, "outputs": [], "source": [ @@ -1398,7 +1389,7 @@ }, { "cell_type": "code", - "execution_count": 110, + "execution_count": 109, "metadata": {}, "outputs": [], "source": [ @@ -1407,7 +1398,7 @@ }, { "cell_type": "code", - "execution_count": 111, + "execution_count": 110, "metadata": {}, "outputs": [], "source": [ @@ -1423,7 +1414,7 @@ }, { "cell_type": "code", - "execution_count": 112, + "execution_count": 111, "metadata": {}, "outputs": [], "source": [ @@ -1431,15 +1422,9 @@ " def __init__(self, threshold=1.0, **kwargs):\n", " super().__init__(**kwargs) # handles base args (e.g., dtype)\n", " self.threshold = threshold\n", - " #self.huber_fn = create_huber(threshold) # TODO: investigate why this fails\n", + " self.huber_fn = create_huber(threshold)\n", " self.total = self.add_weight(\"total\", initializer=\"zeros\")\n", " self.count = self.add_weight(\"count\", initializer=\"zeros\")\n", - " def huber_fn(self, y_true, y_pred): # workaround\n", - " error = y_true - y_pred\n", - " is_small_error = tf.abs(error) < self.threshold\n", - " squared_loss = tf.square(error) / 2\n", - " linear_loss = self.threshold * tf.abs(error) - self.threshold**2 / 2\n", - " return tf.where(is_small_error, squared_loss, linear_loss)\n", " def update_state(self, y_true, y_pred, sample_weight=None):\n", " metric = self.huber_fn(y_true, y_pred)\n", " self.total.assign_add(tf.reduce_sum(metric))\n", @@ -1451,16 +1436,9 @@ " return {**base_config, \"threshold\": self.threshold}" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Warning**: when running the following cell, if you get autograph warnings such as `WARNING:tensorflow:AutoGraph could not transform [...] and will run it as-is`, then please install version 0.2.2 of the gast library (e.g., by running `!pip install gast==0.2.2`), then restart the kernel and run this notebook again from the beginning (see [autograph issue #1](https://github.com/tensorflow/autograph/issues/1) for more details):" - ] - }, { "cell_type": "code", - "execution_count": 113, + "execution_count": 112, "metadata": {}, "outputs": [], "source": [ @@ -1474,7 +1452,7 @@ }, { "cell_type": "code", - "execution_count": 114, + "execution_count": 113, "metadata": {}, "outputs": [], "source": [ @@ -1488,7 +1466,7 @@ }, { "cell_type": "code", - "execution_count": 115, + "execution_count": 114, "metadata": {}, "outputs": [], "source": [ @@ -1497,7 +1475,7 @@ }, { "cell_type": "code", - "execution_count": 116, + "execution_count": 115, "metadata": {}, "outputs": [], "source": [ @@ -1514,7 +1492,7 @@ }, { "cell_type": "code", - "execution_count": 117, + "execution_count": 116, "metadata": {}, "outputs": [], "source": [ @@ -1525,7 +1503,7 @@ }, { "cell_type": "code", - "execution_count": 118, + "execution_count": 117, "metadata": {}, "outputs": [], "source": [ @@ -1538,7 +1516,7 @@ }, { "cell_type": "code", - "execution_count": 119, + "execution_count": 118, "metadata": {}, "outputs": [], "source": [ @@ -1547,7 +1525,7 @@ }, { "cell_type": "code", - "execution_count": 120, + "execution_count": 119, "metadata": {}, "outputs": [], "source": [ @@ -1556,7 +1534,7 @@ }, { "cell_type": "code", - "execution_count": 121, + "execution_count": 120, "metadata": {}, "outputs": [], "source": [ @@ -1565,18 +1543,18 @@ }, { "cell_type": "code", - "execution_count": 122, + "execution_count": 121, "metadata": {}, "outputs": [], "source": [ - "#model = keras.models.load_model(\"my_model_with_a_custom_metric.h5\", # TODO: check PR #25956\n", - "# custom_objects={\"huber_fn\": create_huber(2.0),\n", - "# \"HuberMetric\": HuberMetric})" + "model = keras.models.load_model(\"my_model_with_a_custom_metric.h5\",\n", + " custom_objects={\"huber_fn\": create_huber(2.0),\n", + " \"HuberMetric\": HuberMetric})" ] }, { "cell_type": "code", - "execution_count": 123, + "execution_count": 122, "metadata": {}, "outputs": [], "source": [ @@ -1592,7 +1570,7 @@ }, { "cell_type": "code", - "execution_count": 124, + "execution_count": 123, "metadata": {}, "outputs": [], "source": [ @@ -1608,7 +1586,7 @@ }, { "cell_type": "code", - "execution_count": 125, + "execution_count": 124, "metadata": {}, "outputs": [], "source": [ @@ -1634,7 +1612,7 @@ }, { "cell_type": "code", - "execution_count": 126, + "execution_count": 125, "metadata": {}, "outputs": [], "source": [ @@ -1645,7 +1623,7 @@ }, { "cell_type": "code", - "execution_count": 127, + "execution_count": 126, "metadata": {}, "outputs": [], "source": [ @@ -1658,7 +1636,7 @@ }, { "cell_type": "code", - "execution_count": 128, + "execution_count": 127, "metadata": {}, "outputs": [], "source": [ @@ -1667,7 +1645,7 @@ }, { "cell_type": "code", - "execution_count": 129, + "execution_count": 128, "metadata": { "scrolled": true }, @@ -1680,7 +1658,7 @@ }, { "cell_type": "code", - "execution_count": 130, + "execution_count": 129, "metadata": {}, "outputs": [], "source": [ @@ -1689,7 +1667,7 @@ }, { "cell_type": "code", - "execution_count": 131, + "execution_count": 130, "metadata": {}, "outputs": [], "source": [ @@ -1698,33 +1676,26 @@ }, { "cell_type": "code", - "execution_count": 132, + "execution_count": 131, "metadata": {}, "outputs": [], "source": [ - "#model = keras.models.load_model(\"my_model_with_a_custom_metric_v2.h5\", # TODO: check PR #25956\n", - "# custom_objects={\"HuberMetric\": HuberMetric})" + "model = keras.models.load_model(\"my_model_with_a_custom_metric_v2.h5\",\n", + " custom_objects={\"HuberMetric\": HuberMetric})" ] }, { "cell_type": "code", - "execution_count": 133, + "execution_count": 132, "metadata": {}, "outputs": [], "source": [ "model.fit(X_train_scaled.astype(np.float32), y_train.astype(np.float32), epochs=2)" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Warning**: In TF 2.2, tf.keras adds an extra first metric in `model.metrics` at position 0 (see [TF issue #38150](https://github.com/tensorflow/tensorflow/issues/38150)). This forces us to use `model.metrics[-1]` rather than `model.metrics[0]` to access the `HuberMetric`." - ] - }, { "cell_type": "code", - "execution_count": 134, + "execution_count": 133, "metadata": { "scrolled": true }, @@ -1742,7 +1713,7 @@ }, { "cell_type": "code", - "execution_count": 135, + "execution_count": 134, "metadata": {}, "outputs": [], "source": [ @@ -1751,7 +1722,7 @@ }, { "cell_type": "code", - "execution_count": 136, + "execution_count": 135, "metadata": {}, "outputs": [], "source": [ @@ -1767,7 +1738,7 @@ }, { "cell_type": "code", - "execution_count": 137, + "execution_count": 136, "metadata": {}, "outputs": [], "source": [ @@ -1778,7 +1749,7 @@ }, { "cell_type": "code", - "execution_count": 138, + "execution_count": 137, "metadata": {}, "outputs": [], "source": [ @@ -1787,7 +1758,7 @@ " keras.layers.Dense(1),\n", " exponential_layer\n", "])\n", - "model.compile(loss=\"mse\", optimizer=\"nadam\")\n", + "model.compile(loss=\"mse\", optimizer=\"sgd\")\n", "model.fit(X_train_scaled, y_train, epochs=5,\n", " validation_data=(X_valid_scaled, y_valid))\n", "model.evaluate(X_test_scaled, y_test)" @@ -1795,7 +1766,7 @@ }, { "cell_type": "code", - "execution_count": 139, + "execution_count": 138, "metadata": {}, "outputs": [], "source": [ @@ -1827,7 +1798,7 @@ }, { "cell_type": "code", - "execution_count": 140, + "execution_count": 139, "metadata": {}, "outputs": [], "source": [ @@ -1838,7 +1809,7 @@ }, { "cell_type": "code", - "execution_count": 141, + "execution_count": 140, "metadata": {}, "outputs": [], "source": [ @@ -1850,7 +1821,7 @@ }, { "cell_type": "code", - "execution_count": 142, + "execution_count": 141, "metadata": {}, "outputs": [], "source": [ @@ -1862,7 +1833,7 @@ }, { "cell_type": "code", - "execution_count": 143, + "execution_count": 142, "metadata": {}, "outputs": [], "source": [ @@ -1871,7 +1842,7 @@ }, { "cell_type": "code", - "execution_count": 144, + "execution_count": 143, "metadata": {}, "outputs": [], "source": [ @@ -1881,7 +1852,7 @@ }, { "cell_type": "code", - "execution_count": 145, + "execution_count": 144, "metadata": {}, "outputs": [], "source": [ @@ -1897,7 +1868,7 @@ }, { "cell_type": "code", - "execution_count": 146, + "execution_count": 145, "metadata": {}, "outputs": [], "source": [ @@ -1908,7 +1879,7 @@ }, { "cell_type": "code", - "execution_count": 147, + "execution_count": 146, "metadata": {}, "outputs": [], "source": [ @@ -1926,7 +1897,7 @@ }, { "cell_type": "code", - "execution_count": 148, + "execution_count": 147, "metadata": {}, "outputs": [], "source": [ @@ -1948,7 +1919,7 @@ }, { "cell_type": "code", - "execution_count": 149, + "execution_count": 148, "metadata": {}, "outputs": [], "source": [ @@ -1967,7 +1938,7 @@ }, { "cell_type": "code", - "execution_count": 150, + "execution_count": 149, "metadata": {}, "outputs": [], "source": [ @@ -1976,7 +1947,7 @@ }, { "cell_type": "code", - "execution_count": 151, + "execution_count": 150, "metadata": {}, "outputs": [], "source": [ @@ -1996,7 +1967,7 @@ }, { "cell_type": "code", - "execution_count": 152, + "execution_count": 151, "metadata": {}, "outputs": [], "source": [ @@ -2019,7 +1990,7 @@ }, { "cell_type": "code", - "execution_count": 153, + "execution_count": 152, "metadata": {}, "outputs": [], "source": [ @@ -2030,7 +2001,7 @@ }, { "cell_type": "code", - "execution_count": 154, + "execution_count": 153, "metadata": {}, "outputs": [], "source": [ @@ -2043,7 +2014,7 @@ }, { "cell_type": "code", - "execution_count": 155, + "execution_count": 154, "metadata": {}, "outputs": [], "source": [ @@ -2052,7 +2023,7 @@ }, { "cell_type": "code", - "execution_count": 156, + "execution_count": 155, "metadata": {}, "outputs": [], "source": [ @@ -2061,7 +2032,7 @@ }, { "cell_type": "code", - "execution_count": 157, + "execution_count": 156, "metadata": {}, "outputs": [], "source": [ @@ -2077,7 +2048,7 @@ }, { "cell_type": "code", - "execution_count": 158, + "execution_count": 157, "metadata": {}, "outputs": [], "source": [ @@ -2088,7 +2059,7 @@ }, { "cell_type": "code", - "execution_count": 159, + "execution_count": 158, "metadata": {}, "outputs": [], "source": [ @@ -2103,7 +2074,7 @@ }, { "cell_type": "code", - "execution_count": 160, + "execution_count": 159, "metadata": {}, "outputs": [], "source": [ @@ -2120,9 +2091,16 @@ "## Losses and Metrics Based on Model Internals" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Note**: due to an issue introduced in TF 2.2 ([#46858](https://github.com/tensorflow/tensorflow/issues/46858)), it is currently not possible to use `add_loss()` along with the `build()` method. So the following code differs from the book: I create the `reconstruct` layer in the constructor instead of the `build()` method. Unfortunately, this means that the number of units in this layer must be hard-coded (alternatively, it could be passed as an argument to the constructor)." + ] + }, { "cell_type": "code", - "execution_count": 161, + "execution_count": 160, "metadata": {}, "outputs": [], "source": [ @@ -2133,13 +2111,14 @@ " kernel_initializer=\"lecun_normal\")\n", " for _ in range(5)]\n", " self.out = keras.layers.Dense(output_dim)\n", - " # TODO: check https://github.com/tensorflow/tensorflow/issues/26260\n", - " #self.reconstruction_mean = keras.metrics.Mean(name=\"reconstruction_error\")\n", + " self.reconstruct = keras.layers.Dense(8) # workaround for TF issue #46858\n", + " self.reconstruction_mean = keras.metrics.Mean(name=\"reconstruction_error\")\n", "\n", - " def build(self, batch_input_shape):\n", - " n_inputs = batch_input_shape[-1]\n", - " self.reconstruct = keras.layers.Dense(n_inputs)\n", - " super().build(batch_input_shape)\n", + " #Commented out due to TF issue #46858, see the note above\n", + " #def build(self, batch_input_shape):\n", + " # n_inputs = batch_input_shape[-1]\n", + " # self.reconstruct = keras.layers.Dense(n_inputs)\n", + " # super().build(batch_input_shape)\n", "\n", " def call(self, inputs, training=None):\n", " Z = inputs\n", @@ -2148,15 +2127,15 @@ " reconstruction = self.reconstruct(Z)\n", " recon_loss = tf.reduce_mean(tf.square(reconstruction - inputs))\n", " self.add_loss(0.05 * recon_loss)\n", - " #if training:\n", - " # result = self.reconstruction_mean(recon_loss)\n", - " # self.add_metric(result)\n", + " if training:\n", + " result = self.reconstruction_mean(recon_loss)\n", + " self.add_metric(result)\n", " return self.out(Z)" ] }, { "cell_type": "code", - "execution_count": 162, + "execution_count": 161, "metadata": {}, "outputs": [], "source": [ @@ -2167,7 +2146,7 @@ }, { "cell_type": "code", - "execution_count": 163, + "execution_count": 162, "metadata": {}, "outputs": [], "source": [ @@ -2186,7 +2165,7 @@ }, { "cell_type": "code", - "execution_count": 164, + "execution_count": 163, "metadata": {}, "outputs": [], "source": [ @@ -2196,7 +2175,7 @@ }, { "cell_type": "code", - "execution_count": 165, + "execution_count": 164, "metadata": {}, "outputs": [], "source": [ @@ -2207,7 +2186,7 @@ }, { "cell_type": "code", - "execution_count": 166, + "execution_count": 165, "metadata": {}, "outputs": [], "source": [ @@ -2216,7 +2195,7 @@ }, { "cell_type": "code", - "execution_count": 167, + "execution_count": 166, "metadata": {}, "outputs": [], "source": [ @@ -2229,7 +2208,7 @@ }, { "cell_type": "code", - "execution_count": 168, + "execution_count": 167, "metadata": {}, "outputs": [], "source": [ @@ -2238,7 +2217,7 @@ }, { "cell_type": "code", - "execution_count": 169, + "execution_count": 168, "metadata": {}, "outputs": [], "source": [ @@ -2254,7 +2233,7 @@ }, { "cell_type": "code", - "execution_count": 170, + "execution_count": 169, "metadata": {}, "outputs": [], "source": [ @@ -2268,7 +2247,7 @@ }, { "cell_type": "code", - "execution_count": 171, + "execution_count": 170, "metadata": {}, "outputs": [], "source": [ @@ -2277,7 +2256,7 @@ }, { "cell_type": "code", - "execution_count": 172, + "execution_count": 171, "metadata": {}, "outputs": [], "source": [ @@ -2290,7 +2269,7 @@ }, { "cell_type": "code", - "execution_count": 173, + "execution_count": 172, "metadata": {}, "outputs": [], "source": [ @@ -2299,7 +2278,7 @@ }, { "cell_type": "code", - "execution_count": 174, + "execution_count": 173, "metadata": {}, "outputs": [], "source": [ @@ -2313,7 +2292,7 @@ }, { "cell_type": "code", - "execution_count": 175, + "execution_count": 174, "metadata": {}, "outputs": [], "source": [ @@ -2322,7 +2301,7 @@ }, { "cell_type": "code", - "execution_count": 176, + "execution_count": 175, "metadata": {}, "outputs": [], "source": [ @@ -2336,7 +2315,7 @@ }, { "cell_type": "code", - "execution_count": 177, + "execution_count": 176, "metadata": {}, "outputs": [], "source": [ @@ -2351,7 +2330,7 @@ }, { "cell_type": "code", - "execution_count": 178, + "execution_count": 177, "metadata": {}, "outputs": [], "source": [ @@ -2366,7 +2345,7 @@ }, { "cell_type": "code", - "execution_count": 179, + "execution_count": 178, "metadata": {}, "outputs": [], "source": [ @@ -2375,7 +2354,7 @@ }, { "cell_type": "code", - "execution_count": 180, + "execution_count": 179, "metadata": {}, "outputs": [], "source": [ @@ -2384,7 +2363,7 @@ }, { "cell_type": "code", - "execution_count": 181, + "execution_count": 180, "metadata": {}, "outputs": [], "source": [ @@ -2399,7 +2378,7 @@ }, { "cell_type": "code", - "execution_count": 182, + "execution_count": 181, "metadata": {}, "outputs": [], "source": [ @@ -2412,7 +2391,7 @@ }, { "cell_type": "code", - "execution_count": 183, + "execution_count": 182, "metadata": {}, "outputs": [], "source": [ @@ -2421,7 +2400,7 @@ }, { "cell_type": "code", - "execution_count": 184, + "execution_count": 183, "metadata": {}, "outputs": [], "source": [ @@ -2434,7 +2413,7 @@ }, { "cell_type": "code", - "execution_count": 185, + "execution_count": 184, "metadata": {}, "outputs": [], "source": [ @@ -2448,7 +2427,7 @@ }, { "cell_type": "code", - "execution_count": 186, + "execution_count": 185, "metadata": {}, "outputs": [], "source": [ @@ -2458,7 +2437,7 @@ }, { "cell_type": "code", - "execution_count": 187, + "execution_count": 186, "metadata": {}, "outputs": [], "source": [ @@ -2478,7 +2457,7 @@ }, { "cell_type": "code", - "execution_count": 188, + "execution_count": 187, "metadata": {}, "outputs": [], "source": [ @@ -2489,7 +2468,7 @@ }, { "cell_type": "code", - "execution_count": 189, + "execution_count": 188, "metadata": {}, "outputs": [], "source": [ @@ -2503,7 +2482,7 @@ }, { "cell_type": "code", - "execution_count": 190, + "execution_count": 189, "metadata": {}, "outputs": [], "source": [ @@ -2514,7 +2493,7 @@ }, { "cell_type": "code", - "execution_count": 191, + "execution_count": 190, "metadata": {}, "outputs": [], "source": [ @@ -2528,7 +2507,7 @@ }, { "cell_type": "code", - "execution_count": 192, + "execution_count": 191, "metadata": {}, "outputs": [], "source": [ @@ -2553,7 +2532,7 @@ }, { "cell_type": "code", - "execution_count": 193, + "execution_count": 192, "metadata": {}, "outputs": [], "source": [ @@ -2568,7 +2547,7 @@ }, { "cell_type": "code", - "execution_count": 194, + "execution_count": 193, "metadata": {}, "outputs": [], "source": [ @@ -2577,7 +2556,7 @@ }, { "cell_type": "code", - "execution_count": 195, + "execution_count": 194, "metadata": {}, "outputs": [], "source": [ @@ -2590,7 +2569,7 @@ }, { "cell_type": "code", - "execution_count": 196, + "execution_count": 195, "metadata": {}, "outputs": [], "source": [ @@ -2606,7 +2585,7 @@ }, { "cell_type": "code", - "execution_count": 197, + "execution_count": 196, "metadata": {}, "outputs": [], "source": [ @@ -2617,7 +2596,7 @@ }, { "cell_type": "code", - "execution_count": 198, + "execution_count": 197, "metadata": {}, "outputs": [], "source": [ @@ -2632,7 +2611,7 @@ }, { "cell_type": "code", - "execution_count": 199, + "execution_count": 198, "metadata": {}, "outputs": [], "source": [ @@ -2660,7 +2639,7 @@ }, { "cell_type": "code", - "execution_count": 200, + "execution_count": 199, "metadata": {}, "outputs": [], "source": [ @@ -2703,7 +2682,7 @@ }, { "cell_type": "code", - "execution_count": 201, + "execution_count": 200, "metadata": {}, "outputs": [], "source": [ @@ -2713,7 +2692,7 @@ }, { "cell_type": "code", - "execution_count": 202, + "execution_count": 201, "metadata": {}, "outputs": [], "source": [ @@ -2722,7 +2701,7 @@ }, { "cell_type": "code", - "execution_count": 203, + "execution_count": 202, "metadata": {}, "outputs": [], "source": [ @@ -2731,7 +2710,7 @@ }, { "cell_type": "code", - "execution_count": 204, + "execution_count": 203, "metadata": {}, "outputs": [], "source": [ @@ -2741,7 +2720,7 @@ }, { "cell_type": "code", - "execution_count": 205, + "execution_count": 204, "metadata": {}, "outputs": [], "source": [ @@ -2750,7 +2729,7 @@ }, { "cell_type": "code", - "execution_count": 206, + "execution_count": 205, "metadata": {}, "outputs": [], "source": [ @@ -2766,7 +2745,7 @@ }, { "cell_type": "code", - "execution_count": 207, + "execution_count": 206, "metadata": {}, "outputs": [], "source": [ @@ -2776,7 +2755,7 @@ }, { "cell_type": "code", - "execution_count": 208, + "execution_count": 207, "metadata": {}, "outputs": [], "source": [ @@ -2785,7 +2764,7 @@ }, { "cell_type": "code", - "execution_count": 209, + "execution_count": 208, "metadata": {}, "outputs": [], "source": [ @@ -2801,7 +2780,7 @@ }, { "cell_type": "code", - "execution_count": 210, + "execution_count": 209, "metadata": {}, "outputs": [], "source": [ @@ -2810,7 +2789,7 @@ }, { "cell_type": "code", - "execution_count": 211, + "execution_count": 210, "metadata": {}, "outputs": [], "source": [ @@ -2820,7 +2799,7 @@ }, { "cell_type": "code", - "execution_count": 212, + "execution_count": 211, "metadata": {}, "outputs": [], "source": [ @@ -2830,7 +2809,7 @@ }, { "cell_type": "code", - "execution_count": 213, + "execution_count": 212, "metadata": {}, "outputs": [], "source": [ @@ -2839,7 +2818,7 @@ }, { "cell_type": "code", - "execution_count": 214, + "execution_count": 213, "metadata": {}, "outputs": [], "source": [ @@ -2848,7 +2827,7 @@ }, { "cell_type": "code", - "execution_count": 215, + "execution_count": 214, "metadata": {}, "outputs": [], "source": [ @@ -2857,7 +2836,7 @@ }, { "cell_type": "code", - "execution_count": 216, + "execution_count": 215, "metadata": {}, "outputs": [], "source": [ @@ -2873,7 +2852,7 @@ }, { "cell_type": "code", - "execution_count": 217, + "execution_count": 216, "metadata": {}, "outputs": [], "source": [ @@ -2885,7 +2864,7 @@ }, { "cell_type": "code", - "execution_count": 218, + "execution_count": 217, "metadata": {}, "outputs": [], "source": [ @@ -2894,7 +2873,7 @@ }, { "cell_type": "code", - "execution_count": 219, + "execution_count": 218, "metadata": {}, "outputs": [], "source": [ @@ -2903,7 +2882,7 @@ }, { "cell_type": "code", - "execution_count": 220, + "execution_count": 219, "metadata": {}, "outputs": [], "source": [ @@ -2923,7 +2902,7 @@ }, { "cell_type": "code", - "execution_count": 221, + "execution_count": 220, "metadata": {}, "outputs": [], "source": [ @@ -2935,7 +2914,7 @@ }, { "cell_type": "code", - "execution_count": 222, + "execution_count": 221, "metadata": {}, "outputs": [], "source": [ @@ -2946,7 +2925,7 @@ }, { "cell_type": "code", - "execution_count": 223, + "execution_count": 222, "metadata": {}, "outputs": [], "source": [ @@ -2958,7 +2937,7 @@ }, { "cell_type": "code", - "execution_count": 224, + "execution_count": 223, "metadata": {}, "outputs": [], "source": [ @@ -2985,7 +2964,7 @@ }, { "cell_type": "code", - "execution_count": 225, + "execution_count": 224, "metadata": {}, "outputs": [], "source": [ @@ -2998,7 +2977,7 @@ }, { "cell_type": "code", - "execution_count": 226, + "execution_count": 225, "metadata": {}, "outputs": [], "source": [ @@ -3007,7 +2986,7 @@ }, { "cell_type": "code", - "execution_count": 227, + "execution_count": 226, "metadata": {}, "outputs": [], "source": [ @@ -3023,7 +3002,7 @@ }, { "cell_type": "code", - "execution_count": 228, + "execution_count": 227, "metadata": {}, "outputs": [], "source": [ @@ -3037,7 +3016,7 @@ }, { "cell_type": "code", - "execution_count": 229, + "execution_count": 228, "metadata": {}, "outputs": [], "source": [ @@ -3046,7 +3025,7 @@ }, { "cell_type": "code", - "execution_count": 230, + "execution_count": 229, "metadata": {}, "outputs": [], "source": [ @@ -3062,7 +3041,7 @@ }, { "cell_type": "code", - "execution_count": 231, + "execution_count": 230, "metadata": {}, "outputs": [], "source": [ @@ -3075,7 +3054,7 @@ }, { "cell_type": "code", - "execution_count": 232, + "execution_count": 231, "metadata": {}, "outputs": [], "source": [ @@ -3091,7 +3070,7 @@ }, { "cell_type": "code", - "execution_count": 233, + "execution_count": 232, "metadata": {}, "outputs": [], "source": [ @@ -3104,7 +3083,7 @@ }, { "cell_type": "code", - "execution_count": 234, + "execution_count": 233, "metadata": {}, "outputs": [], "source": [ @@ -3114,7 +3093,7 @@ }, { "cell_type": "code", - "execution_count": 235, + "execution_count": 234, "metadata": {}, "outputs": [], "source": [ @@ -3124,7 +3103,7 @@ }, { "cell_type": "code", - "execution_count": 236, + "execution_count": 235, "metadata": {}, "outputs": [], "source": [ @@ -3137,7 +3116,7 @@ }, { "cell_type": "code", - "execution_count": 237, + "execution_count": 236, "metadata": {}, "outputs": [], "source": [ @@ -3147,7 +3126,7 @@ }, { "cell_type": "code", - "execution_count": 238, + "execution_count": 237, "metadata": {}, "outputs": [], "source": [ @@ -3157,7 +3136,7 @@ }, { "cell_type": "code", - "execution_count": 239, + "execution_count": 238, "metadata": {}, "outputs": [], "source": [ @@ -3172,7 +3151,7 @@ }, { "cell_type": "code", - "execution_count": 240, + "execution_count": 239, "metadata": {}, "outputs": [], "source": [ @@ -3183,7 +3162,7 @@ }, { "cell_type": "code", - "execution_count": 241, + "execution_count": 240, "metadata": { "scrolled": true }, @@ -3195,12 +3174,12 @@ " x += 1\n", " return x\n", "\n", - "tf.autograph.to_code(add_10.python_function)" + "print(tf.autograph.to_code(add_10.python_function))" ] }, { "cell_type": "code", - "execution_count": 242, + "execution_count": 241, "metadata": {}, "outputs": [], "source": [ @@ -3214,7 +3193,7 @@ }, { "cell_type": "code", - "execution_count": 243, + "execution_count": 242, "metadata": {}, "outputs": [], "source": [ @@ -3238,7 +3217,7 @@ }, { "cell_type": "code", - "execution_count": 244, + "execution_count": 243, "metadata": {}, "outputs": [], "source": [ @@ -3250,7 +3229,7 @@ }, { "cell_type": "code", - "execution_count": 245, + "execution_count": 244, "metadata": {}, "outputs": [], "source": [ @@ -3262,7 +3241,7 @@ }, { "cell_type": "code", - "execution_count": 246, + "execution_count": 245, "metadata": {}, "outputs": [], "source": [ @@ -3291,7 +3270,7 @@ }, { "cell_type": "code", - "execution_count": 247, + "execution_count": 246, "metadata": {}, "outputs": [], "source": [ @@ -3302,7 +3281,7 @@ }, { "cell_type": "code", - "execution_count": 248, + "execution_count": 247, "metadata": {}, "outputs": [], "source": [ @@ -3327,7 +3306,7 @@ }, { "cell_type": "code", - "execution_count": 249, + "execution_count": 248, "metadata": {}, "outputs": [], "source": [ @@ -3336,7 +3315,7 @@ }, { "cell_type": "code", - "execution_count": 250, + "execution_count": 249, "metadata": {}, "outputs": [], "source": [ @@ -3354,7 +3333,7 @@ }, { "cell_type": "code", - "execution_count": 251, + "execution_count": 250, "metadata": {}, "outputs": [], "source": [ @@ -3365,7 +3344,7 @@ }, { "cell_type": "code", - "execution_count": 252, + "execution_count": 251, "metadata": {}, "outputs": [], "source": [ @@ -3374,7 +3353,7 @@ }, { "cell_type": "code", - "execution_count": 253, + "execution_count": 252, "metadata": {}, "outputs": [], "source": [ @@ -3390,7 +3369,7 @@ }, { "cell_type": "code", - "execution_count": 254, + "execution_count": 253, "metadata": {}, "outputs": [], "source": [ @@ -3408,7 +3387,7 @@ }, { "cell_type": "code", - "execution_count": 255, + "execution_count": 254, "metadata": {}, "outputs": [], "source": [ @@ -3419,7 +3398,7 @@ }, { "cell_type": "code", - "execution_count": 256, + "execution_count": 255, "metadata": {}, "outputs": [], "source": [ @@ -3428,7 +3407,7 @@ }, { "cell_type": "code", - "execution_count": 257, + "execution_count": 256, "metadata": {}, "outputs": [], "source": [ @@ -3437,7 +3416,7 @@ }, { "cell_type": "code", - "execution_count": 258, + "execution_count": 257, "metadata": {}, "outputs": [], "source": [ @@ -3462,7 +3441,7 @@ }, { "cell_type": "code", - "execution_count": 259, + "execution_count": 258, "metadata": {}, "outputs": [], "source": [ @@ -3508,7 +3487,7 @@ }, { "cell_type": "code", - "execution_count": 260, + "execution_count": 259, "metadata": {}, "outputs": [], "source": [ @@ -3519,7 +3498,7 @@ }, { "cell_type": "code", - "execution_count": 261, + "execution_count": 260, "metadata": {}, "outputs": [], "source": [ @@ -3576,7 +3555,7 @@ }, { "cell_type": "code", - "execution_count": 262, + "execution_count": 261, "metadata": {}, "outputs": [], "source": [ @@ -3630,7 +3609,7 @@ }, { "cell_type": "code", - "execution_count": 263, + "execution_count": 262, "metadata": {}, "outputs": [], "source": [ @@ -3652,7 +3631,7 @@ }, { "cell_type": "code", - "execution_count": 264, + "execution_count": 263, "metadata": {}, "outputs": [], "source": [ @@ -3691,7 +3670,7 @@ }, { "cell_type": "code", - "execution_count": 265, + "execution_count": 264, "metadata": {}, "outputs": [], "source": [ @@ -3704,7 +3683,7 @@ }, { "cell_type": "code", - "execution_count": 266, + "execution_count": 265, "metadata": {}, "outputs": [], "source": [ @@ -3715,7 +3694,7 @@ }, { "cell_type": "code", - "execution_count": 267, + "execution_count": 266, "metadata": {}, "outputs": [], "source": [ @@ -3728,7 +3707,7 @@ }, { "cell_type": "code", - "execution_count": 268, + "execution_count": 267, "metadata": {}, "outputs": [], "source": [ @@ -3743,7 +3722,7 @@ }, { "cell_type": "code", - "execution_count": 269, + "execution_count": 268, "metadata": {}, "outputs": [], "source": [ @@ -3787,7 +3766,7 @@ }, { "cell_type": "code", - "execution_count": 270, + "execution_count": 269, "metadata": {}, "outputs": [], "source": [ @@ -3798,7 +3777,7 @@ }, { "cell_type": "code", - "execution_count": 271, + "execution_count": 270, "metadata": {}, "outputs": [], "source": [ @@ -3816,7 +3795,7 @@ }, { "cell_type": "code", - "execution_count": 272, + "execution_count": 271, "metadata": {}, "outputs": [], "source": [ @@ -3826,7 +3805,7 @@ }, { "cell_type": "code", - "execution_count": 273, + "execution_count": 272, "metadata": {}, "outputs": [], "source": [ @@ -3840,7 +3819,7 @@ }, { "cell_type": "code", - "execution_count": 274, + "execution_count": 273, "metadata": {}, "outputs": [], "source": [ @@ -3901,7 +3880,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.6" + "version": "3.7.9" } }, "nbformat": 4, diff --git a/13_loading_and_preprocessing_data.ipynb b/13_loading_and_preprocessing_data.ipynb index 144b216..0c82bb2 100644 --- a/13_loading_and_preprocessing_data.ipynb +++ b/13_loading_and_preprocessing_data.ipynb @@ -1026,7 +1026,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "**Warning**: there's currently a bug preventing `from tensorflow.train import X` so we work around it by writing `X = tf.train.X`. See https://github.com/tensorflow/tensorflow/issues/33289 for more details." + "**Warning**: in TensorFlow 2.0 and 2.1, there was a bug preventing `from tensorflow.train import X` so we work around it by writing `X = tf.train.X`. See https://github.com/tensorflow/tensorflow/issues/33289 for more details." ] }, { @@ -1294,7 +1294,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "**Warning**: there's currently a bug preventing `from tensorflow.train import X` so we work around it by writing `X = tf.train.X`. See https://github.com/tensorflow/tensorflow/issues/33289 for more details." + "**Warning**: in TensorFlow 2.0 and 2.1, there was a bug preventing `from tensorflow.train import X` so we work around it by writing `X = tf.train.X`. See https://github.com/tensorflow/tensorflow/issues/33289 for more details." ] }, { @@ -1430,7 +1430,7 @@ "source": [ "import os\n", "import tarfile\n", - "import urllib\n", + "import urllib.request\n", "\n", "DOWNLOAD_ROOT = \"https://raw.githubusercontent.com/ageron/handson-ml2/master/\"\n", "HOUSING_PATH = os.path.join(\"datasets\", \"housing\")\n", @@ -2120,7 +2120,7 @@ }, { "cell_type": "code", - "execution_count": 162, + "execution_count": 130, "metadata": {}, "outputs": [], "source": [ @@ -2267,7 +2267,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "It takes about 20 seconds to load the dataset and go through it 10 times." + "It takes about 17 seconds to load the dataset and go through it 10 times." ] }, { @@ -2306,7 +2306,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now it takes about 34 seconds to go through the dataset 10 times. That's much slower, essentially because the dataset is not cached in RAM, so it must be reloaded at each epoch. If you add `.cache()` just before `.repeat(10)`, you will see that this implementation will be about as fast as the previous one." + "Now it takes about 33 seconds to go through the dataset 10 times. That's much slower, essentially because the dataset is not cached in RAM, so it must be reloaded at each epoch. If you add `.cache()` just before `.repeat(10)`, you will see that this implementation will be about as fast as the previous one." ] }, { @@ -2609,7 +2609,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We get about 75% accuracy on the validation set after just the first epoch, but after that the model makes no progress. We will do better in Chapter 16. For now the point is just to perform efficient preprocessing using `tf.data` and Keras preprocessing layers." + "We get about 73.7% accuracy on the validation set after just the first epoch, but after that the model makes no significant progress. We will do better in Chapter 16. For now the point is just to perform efficient preprocessing using `tf.data` and Keras preprocessing layers." ] }, { @@ -2766,7 +2766,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.6" + "version": "3.7.9" }, "nav_menu": { "height": "264px", diff --git a/environment.yml b/environment.yml index ed38bc8..8025023 100644 --- a/environment.yml +++ b/environment.yml @@ -3,55 +3,45 @@ channels: - conda-forge - defaults dependencies: - - graphviz - - imageio=2.6 - - ipython=7.12 - - ipywidgets=7.5 - - joblib=0.14 - - jupyter=1.0 - - matplotlib=3.1 - - nbdime=2.0 - - nltk=3.4 - - numexpr=2.7 - - numpy=1.18 - - pandas=1.0 - - pillow=7.0 - - pip - - psutil=5.7 - - py-xgboost=0.90 - - pydot=1.4 - - pyglet=1.5 - - pyopengl=3.1 - - python=3.7 - - python-graphviz - #- pyvirtualdisplay=0.2 # add if on headless server - - requests=2.22 - - scikit-image=0.16 - - scikit-learn=0.22 - - scipy=1.4 - - tqdm=4.43 - - wheel - - widgetsnbextension=3.5 + - atari_py=0.2 # used only in chapter 18 + - ftfy=5.8 # used only in chapter 16 by the transformers library + - graphviz # used only in chapter 6 for dot files + - gym=0.18 # used only in chapter 18 + - ipython=7.20 # a powerful Python shell + - ipywidgets=7.6 # optionally used only in chapter 12 for tqdm in Jupyter + - joblib=0.14 # used only in chapter 2 to save/load Scikit-Learn models + - jupyter=1.0.0 # to edit and run Jupyter notebooks + - matplotlib=3.3.4 # beautiful plots. See tutorial tools_matplotlib.ipynb + - nbdime=2.1.0 # optional tool to diff Jupyter notebooks + - nltk=3.4.4 # optionally used in chapter 3, exercise 4 + - numexpr=2.7.2 # used only in the Pandas tutorial for numerical expressions + - numpy=1.19.5 # Powerful n-dimensional arrays and numerical computing tools + - opencv=4.5.1 # used only in chapter 18 by TF Agents for image preprocessing + - pandas=1.2.2 # data analysis and manipulation tool + - pillow=8.1.0 # image manipulation library, (used by matplotlib.image.imread) + - pip # Python's package-management system + - py-xgboost=1.3.0 # used only in chapter 7 for optimized Gradient Boosting + - pyglet=1.5.15 # used only in chapter 18 to render environments + - pyopengl=3.1.5 # used only in chapter 18 to render environments + - python=3.7 # Python! Not using latest version as some libs lack support + - python-graphviz # used only in chapter 6 for dot files + #- pyvirtualdisplay=1.3 # used only in chapter 18 if on headless server + - requests=2.25.1 # used only in chapter 19 for REST API queries + - scikit-learn=0.24.1 # machine learning library + - scipy=1.6.0 # scientific/technical computing library + - tqdm=4.56.1 # a progress bar library + - transformers=4.3.2 # Natural Language Processing lib for TF or PyTorch + - wheel # built-package format for pip + - widgetsnbextension=3.5.1 # interactive HTML widgets for Jupyter notebooks - pip: - - atari-py==0.2.6 - - ftfy==5.7 - - gast==0.2.2 - - gym==0.17.1 - - opencv-python==4.2.0.32 - - spacy==2.2.4 - - tensorboard==2.1.1 - - tensorflow-addons==0.8.3 - - tensorflow-data-validation==0.21.5 - - tensorflow-datasets==2.1.0 - - tensorflow-estimator==2.1.0 - - tensorflow-hub==0.7.0 - - tensorflow-metadata==0.21.1 - - tensorflow-model-analysis==0.21.6 - - tensorflow-probability==0.9.0 - - tensorflow-serving-api==2.1.0 # or tensorflow-serving-api-gpu if gpu - - tensorflow-transform==0.21.2 - - tensorflow==2.1.0 # or tensorflow-gpu if gpu - - tf-agents==0.3.0 - - tfx==0.21.2 - - transformers==2.8.0 - - urlextract==0.14.0 + - tensorboard-plugin-profile==2.4.0 # profiling plugin for TensorBoard + - tensorboard==2.4.1 # TensorFlow's visualization toolkit + - tensorflow-addons==0.12.1 # used only in chapter 16 for a seq2seq impl. + - tensorflow-datasets==3.0.0 # datasets repository, ready to use + - tensorflow-hub==0.9.0 # trained ML models repository, ready to use + - tensorflow-probability==0.12.1 # Optional. Probability/Stats lib. + - tensorflow-serving-api==2.4.1 # or tensorflow-serving-api-gpu if gpu + - tensorflow==2.4.1 # Deep Learning library + - tf-agents==0.7.1 # Reinforcement Learning lib based on TensorFlow + - tfx==0.27.0 # platform to deploy production ML pipelines + - urlextract==1.2.0 # optionally used in chapter 3, exercise 4 diff --git a/requirements.txt b/requirements.txt index 440c6d7..a80ae0d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,19 +5,19 @@ ##### Core scientific packages jupyter==1.0.0 -matplotlib==3.3.2 -numpy==1.18.5 -pandas==1.1.3 -scipy==1.5.3 +matplotlib==3.3.4 +numpy==1.19.5 +pandas==1.2.2 +scipy==1.6.0 ##### Machine Learning packages -scikit-learn==0.23.2 +scikit-learn==0.24.1 # Optional: the XGBoost library is only used in chapter 7 -xgboost==1.2.1 +xgboost==1.3.3 # Optional: the transformers library is only using in chapter 16 -transformers==3.3.1 +transformers==4.3.2 ##### TensorFlow-related packages @@ -27,39 +27,39 @@ transformers==3.3.1 # you must install CUDA, cuDNN and more: see tensorflow.org for the detailed # installation instructions. -tensorflow==2.3.1 +tensorflow==2.4.1 # Optional: the TF Serving API library is just needed for chapter 19. -tensorflow-serving-api==2.3.0 # or tensorflow-serving-api-gpu if gpu +tensorflow-serving-api==2.4.1 # or tensorflow-serving-api-gpu if gpu -tensorboard==2.3.0 -tensorboard-plugin-profile==2.3.0 -tensorflow-datasets==4.0.1 +tensorboard==2.4.1 +tensorboard-plugin-profile==2.4.0 +tensorflow-datasets==3.0.0 tensorflow-hub==0.9.0 -tensorflow-probability==0.11.1 +tensorflow-probability==0.12.1 # Optional: only used in chapter 13. # NOT AVAILABLE ON WINDOWS -tfx==0.24.1 +tfx==0.27.0 # Optional: only used in chapter 16. # NOT AVAILABLE ON WINDOWS -tensorflow-addons==0.11.2 +tensorflow-addons==0.12.1 ##### Reinforcement Learning library (chapter 18) # There are a few dependencies you need to install first, check out: # https://github.com/openai/gym#installing-everything -gym[atari]==0.17.3 +gym[atari]==0.18.0 # On Windows, install atari_py using: # pip install --no-index -f https://github.com/Kojoley/atari-py/releases atari_py -tf-agents==0.6.0 +tf-agents==0.7.1 ##### Image manipulation -Pillow==8.0.0 -graphviz==0.14.2 -opencv-python==4.4.0.44 -pyglet==1.4.11 +Pillow==7.2.0 +graphviz==0.16 +opencv-python==4.5.1.48 +pyglet==1.5.0 #pyvirtualdisplay # needed in chapter 16, if on a headless server # (i.e., without screen, e.g., Colab or VM) @@ -71,32 +71,23 @@ pyglet==1.4.11 joblib==0.14.1 # Easy http requests -requests==2.24.0 +requests==2.25.1 # Nice utility to diff Jupyter Notebooks. nbdime==2.1.0 # May be useful with Pandas for complex "where" clauses (e.g., Pandas # tutorial). -numexpr==2.7.1 +numexpr==2.7.2 # Optional: these libraries can be useful in the classification chapter, # exercise 4. nltk==3.5 -urlextract==1.1.0 +urlextract==1.2.0 # Optional: these libraries are only used in chapter 16 ftfy==5.8 # Optional: tqdm displays nice progress bars, ipywidgets for tqdm's notebook support -tqdm==4.50.2 -ipywidgets==7.5.1 - - - -# Specific lib versions to avoid conflicts -attrs==19.3.0 -cloudpickle==1.3.0 -dill==0.3.1.1 -gast==0.3.3 -httplib2==0.17.4 +tqdm==4.56.1 +ipywidgets==7.6.3