diff --git a/14_deep_computer_vision_with_cnns.ipynb b/14_deep_computer_vision_with_cnns.ipynb
index 0576883..6d4ba18 100644
--- a/14_deep_computer_vision_with_cnns.ipynb
+++ b/14_deep_computer_vision_with_cnns.ipynb
@@ -2,92 +2,159 @@
"cells": [
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "yK7ecnb6pKzp"
+ },
"source": [
- "**Chapter 13 – Deep Computer Vision Using Convolutional Neural Networks**"
+ "**Chapter 14 – Deep Computer Vision Using Convolutional Neural Networks**"
]
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "f6cR-I1WpKzs"
+ },
"source": [
- "_This notebook contains all the sample code and solutions to the exercises in chapter 13._"
+ "_This notebook contains all the sample code and solutions to the exercises in chapter 14._"
]
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "CeYcCO3HpKzt"
+ },
"source": [
"
\n",
" \n",
- " \n",
+ " \n",
" | \n",
" \n",
- " \n",
+ " \n",
" | \n",
"
"
]
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "dFXIv9qNpKzt",
+ "tags": []
+ },
"source": [
"# Setup"
]
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "8IPbJEmZpKzu"
+ },
"source": [
- "First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures."
+ "This project requires Python 3.8 or above:"
]
},
{
"cell_type": "code",
"execution_count": 1,
- "metadata": {},
+ "metadata": {
+ "id": "TFSU3FCOpKzu"
+ },
"outputs": [],
"source": [
- "# Python ≥3.8 is required\n",
"import sys\n",
- "assert sys.version_info >= (3, 8)\n",
"\n",
- "# Is this notebook running on Colab or Kaggle?\n",
- "IS_COLAB = \"google.colab\" in sys.modules\n",
- "IS_KAGGLE = \"kaggle_secrets\" in sys.modules\n",
+ "assert sys.version_info >= (3, 7)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "TAlKky09pKzv"
+ },
+ "source": [
+ "It also requires Scikit-Learn ≥ 1.0.1:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {
+ "id": "YqCwW7cMpKzw"
+ },
+ "outputs": [],
+ "source": [
+ "import sklearn\n",
"\n",
- "# Common imports\n",
- "import numpy as np\n",
+ "assert sklearn.__version__ >= \"1.0.1\""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "GJtVEqxfpKzw"
+ },
+ "source": [
+ "And TensorFlow ≥ 2.6:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "id": "0Piq5se2pKzx"
+ },
+ "outputs": [],
+ "source": [
+ "import tensorflow as tf\n",
+ "\n",
+ "assert tf.__version__ >= \"2.6.0\""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "DDaDoLQTpKzx"
+ },
+ "source": [
+ "As we did in earlier chapters, let's define the default font sizes to make the figures prettier:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {
+ "id": "8d4TH3NbpKzx"
+ },
+ "outputs": [],
+ "source": [
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "plt.rc('font', size=14)\n",
+ "plt.rc('axes', labelsize=14, titlesize=14)\n",
+ "plt.rc('legend', fontsize=14)\n",
+ "plt.rc('xtick', labelsize=10)\n",
+ "plt.rc('ytick', labelsize=10)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "RcoUIRsvpKzy"
+ },
+ "source": [
+ "And let's create the `images/cnn` folder (if it doesn't already exist), and define the `save_fig()` function which is used through this notebook to save the figures in high-res for the book:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {
+ "id": "PQFH5Y9PpKzy"
+ },
+ "outputs": [],
+ "source": [
"from pathlib import Path\n",
"\n",
- "# Scikit-Learn ≥1.0 is required\n",
- "import sklearn\n",
- "assert sklearn.__version__ >= \"1.0\"\n",
- "\n",
- "# TensorFlow ≥2.6 is required\n",
- "import tensorflow as tf\n",
- "assert tf.__version__ >= \"2.6\"\n",
- "\n",
- "# to make this notebook's output stable across runs\n",
- "np.random.seed(42)\n",
- "tf.random.set_seed(42)\n",
- "\n",
- "if not tf.config.list_physical_devices('GPU'):\n",
- " print(\"No GPU was detected. Neural nets can be very slow without a GPU.\")\n",
- " if IS_COLAB:\n",
- " print(\"Go to Runtime > Change runtime and select a GPU hardware accelerator.\")\n",
- " if IS_KAGGLE:\n",
- " print(\"Go to Settings > Accelerator and select GPU.\")\n",
- "\n",
- "# To plot pretty figures\n",
- "%matplotlib inline\n",
- "import matplotlib as mpl\n",
- "import matplotlib.pyplot as plt\n",
- "mpl.rc('axes', labelsize=14)\n",
- "mpl.rc('xtick', labelsize=12)\n",
- "mpl.rc('ytick', labelsize=12)\n",
- "\n",
- "# Where to save the figures\n",
"IMAGES_PATH = Path() / \"images\" / \"cnn\"\n",
"IMAGES_PATH.mkdir(parents=True, exist_ok=True)\n",
"\n",
@@ -100,339 +167,367 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "YTsawKlapKzy"
+ },
"source": [
- "A couple utility functions to plot grayscale and RGB images:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "metadata": {},
- "outputs": [],
- "source": [
- "def plot_image(image):\n",
- " plt.imshow(image, cmap=\"gray\", interpolation=\"nearest\")\n",
- " plt.axis(\"off\")\n",
- "\n",
- "def plot_color_image(image):\n",
- " plt.imshow(image, interpolation=\"nearest\")\n",
- " plt.axis(\"off\")"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# What is a Convolution?"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "metadata": {},
- "outputs": [],
- "source": [
- "import numpy as np\n",
- "from sklearn.datasets import load_sample_image\n",
- "\n",
- "# Load sample images\n",
- "china = load_sample_image(\"china.jpg\") / 255\n",
- "flower = load_sample_image(\"flower.jpg\") / 255\n",
- "images = np.array([china, flower])\n",
- "batch_size, height, width, channels = images.shape\n",
- "\n",
- "# Create 2 filters\n",
- "filters = np.zeros(shape=(7, 7, channels, 2), dtype=np.float32)\n",
- "filters[:, 3, :, 0] = 1 # vertical line\n",
- "filters[3, :, :, 1] = 1 # horizontal line\n",
- "\n",
- "outputs = tf.nn.conv2d(images, filters, strides=1, padding=\"SAME\")\n",
- "\n",
- "plt.imshow(outputs[0, :, :, 1], cmap=\"gray\") # plot 1st image's 2nd feature map\n",
- "plt.axis(\"off\") # Not shown in the book\n",
- "plt.show()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 4,
- "metadata": {},
- "outputs": [],
- "source": [
- "for image_index in (0, 1):\n",
- " for feature_map_index in (0, 1):\n",
- " plt.subplot(2, 2, image_index * 2 + feature_map_index + 1)\n",
- " plot_image(outputs[image_index, :, :, feature_map_index])\n",
- "\n",
- "plt.show()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
- "metadata": {},
- "outputs": [],
- "source": [
- "def crop(images):\n",
- " return images[150:220, 130:250]"
+ "This chapter can be very slow without a GPU, so let's make sure there's one, or else issue a warning:"
]
},
{
"cell_type": "code",
"execution_count": 6,
- "metadata": {},
+ "metadata": {
+ "id": "Ekxzo6pOpKzy"
+ },
"outputs": [],
"source": [
- "plot_image(crop(images[0, :, :, 0]))\n",
- "save_fig(\"china_original\", tight_layout=False)\n",
- "plt.show()\n",
+ "# Is this notebook running on Colab or Kaggle?\n",
+ "IS_COLAB = \"google.colab\" in sys.modules\n",
+ "IS_KAGGLE = \"kaggle_secrets\" in sys.modules\n",
"\n",
- "for feature_map_index, filename in enumerate([\"china_vertical\", \"china_horizontal\"]):\n",
- " plot_image(crop(outputs[0, :, :, feature_map_index]))\n",
- " save_fig(filename, tight_layout=False)\n",
- " plt.show()"
+ "if not tf.config.list_physical_devices('GPU'):\n",
+ " print(\"No GPU was detected. Neural nets can be very slow without a GPU.\")\n",
+ " if IS_COLAB:\n",
+ " print(\"Go to Runtime > Change runtime and select a GPU hardware \"\n",
+ " \"accelerator.\")\n",
+ " if IS_KAGGLE:\n",
+ " print(\"Go to Settings > Accelerator and select GPU.\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "k9Tnd8cwpKzz"
+ },
+ "source": [
+ "# Convolutional Layers\n",
+ "## Implementing Convolutional Layers With Keras"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "KuamdOs5pKz0"
+ },
+ "source": [
+ "Let's load two sample images, rescale their pixel values to 0-1, and center crop them to small 70×120 images:"
]
},
{
"cell_type": "code",
"execution_count": 7,
- "metadata": {},
+ "metadata": {
+ "id": "I-kXsWgDpKz0"
+ },
"outputs": [],
"source": [
- "plot_image(filters[:, :, 0, 0])\n",
- "plt.show()\n",
- "plot_image(filters[:, :, 0, 1])\n",
- "plt.show()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Convolutional Layer"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Let's create a 2D convolutional layer, using `tf.keras.layers.Conv2D()`:"
+ "from sklearn.datasets import load_sample_images\n",
+ "import tensorflow as tf\n",
+ "\n",
+ "images = load_sample_images()[\"images\"]\n",
+ "images = tf.keras.layers.CenterCrop(height=70, width=120)(images)\n",
+ "images = tf.keras.layers.Rescaling(scale=1 / 255)(images)"
]
},
{
"cell_type": "code",
"execution_count": 8,
- "metadata": {},
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "btpkyo8ZpKz0",
+ "outputId": "da87408f-5e8f-4c2e-c21f-4b73028d64a2"
+ },
"outputs": [],
"source": [
- "np.random.seed(42)\n",
- "tf.random.set_seed(42)\n",
- "\n",
- "conv = tf.keras.layers.Conv2D(filters=2, kernel_size=7, strides=1,\n",
- " padding=\"SAME\", activation=\"relu\", input_shape=outputs.shape)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Let's call this layer, passing it the two test images:"
+ "images.shape"
]
},
{
"cell_type": "code",
"execution_count": 9,
- "metadata": {},
+ "metadata": {
+ "id": "Jv6KYhPzpKz0"
+ },
"outputs": [],
"source": [
- "conv_outputs = conv(images)\n",
- "conv_outputs.shape "
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "The output is a 4D tensor. The dimensions are: batch size, height, width, channels. The first dimension (batch size) is 2 since there are 2 input images. The next two dimensions are the height and width of the output feature maps: since `padding=\"SAME\"` and `strides=1`, the output feature maps have the same height and width as the input images (in this case, 427×640). Lastly, this convolutional layer has 2 filters, so the last dimension is 2: there are 2 output feature maps per input image."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Since the filters are initialized randomly, they'll initially detect random patterns. Let's take a look at the 2 output features maps for each image:"
+ "tf.random.set_seed(42) # extra code – ensures reproducibility\n",
+ "conv_layer = tf.keras.layers.Conv2D(filters=32, kernel_size=7)\n",
+ "fmaps = conv_layer(images)"
]
},
{
"cell_type": "code",
"execution_count": 10,
- "metadata": {},
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "w-mtSoL_pKz1",
+ "outputId": "8cf6409d-6c0d-4d44-ceaa-4cc9faab21b7"
+ },
"outputs": [],
"source": [
- "plt.figure(figsize=(10,6))\n",
- "for image_index in (0, 1):\n",
- " for feature_map_index in (0, 1):\n",
- " plt.subplot(2, 2, image_index * 2 + feature_map_index + 1)\n",
- " plot_image(crop(conv_outputs[image_index, :, :, feature_map_index]))\n",
- "plt.show()"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Although the filters were initialized randomly, the second filter happens to act like an edge detector. Randomly initialized filters often act this way, which is quite fortunate since detecting edges is quite useful in image processing."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "If we want, we can set the filters to be the ones we manually defined earlier, and set the biases to zeros (in real life we will almost never need to set filters or biases manually, as the convolutional layer will just learn the appropriate filters and biases during training):"
+ "fmaps.shape"
]
},
{
"cell_type": "code",
"execution_count": 11,
- "metadata": {},
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 511
+ },
+ "id": "ttMBSh9RpKz1",
+ "outputId": "2b6ccb30-f9b9-451c-86e1-a248da78acd2"
+ },
"outputs": [],
"source": [
- "conv.set_weights([filters, np.zeros(2)])"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Now let's call this layer again on the same two images, and let's check that the output feature maps do highlight vertical lines and horizontal lines, respectively (as earlier):"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 12,
- "metadata": {},
- "outputs": [],
- "source": [
- "conv_outputs = conv(images)\n",
- "conv_outputs.shape "
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 13,
- "metadata": {},
- "outputs": [],
- "source": [
- "plt.figure(figsize=(10,6))\n",
- "for image_index in (0, 1):\n",
- " for feature_map_index in (0, 1):\n",
- " plt.subplot(2, 2, image_index * 2 + feature_map_index + 1)\n",
- " plot_image(crop(conv_outputs[image_index, :, :, feature_map_index]))\n",
+ "# extra code – displays the two output feature maps for each image\n",
+ "\n",
+ "plt.figure(figsize=(15, 9))\n",
+ "for image_idx in (0, 1):\n",
+ " for fmap_idx in (0, 1):\n",
+ " plt.subplot(2, 2, image_idx * 2 + fmap_idx + 1)\n",
+ " plt.imshow(fmaps[image_idx, :, :, fmap_idx], cmap=\"gray\")\n",
+ " plt.axis(\"off\")\n",
+ "\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "TyKNtjntpKz1"
+ },
"source": [
- "## VALID vs SAME padding"
+ "As you can see, randomly generated filters typically act like edge detectors, which is great since that's a useful tool in image processing, and that's the type of filters that a convolutional layer typically starts with. Then, during training, it gradually learns improved filters to recognize useful patterns for the task."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "Cm7giIfDpKz1"
+ },
+ "source": [
+ "Now let's use zero-padding:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 12,
+ "metadata": {
+ "id": "HHUI5jsNpKz1"
+ },
+ "outputs": [],
+ "source": [
+ "conv_layer = tf.keras.layers.Conv2D(filters=32, kernel_size=7,\n",
+ " padding=\"same\")\n",
+ "fmaps = conv_layer(images)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "iyfeZ38EpKz2",
+ "outputId": "b42fd198-58c3-4ee5-8158-6252e4eca01d"
+ },
+ "outputs": [],
+ "source": [
+ "fmaps.shape"
]
},
{
"cell_type": "code",
"execution_count": 14,
- "metadata": {},
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "80Umgdm1pKz2",
+ "outputId": "ae8d2cf0-5b8e-4b83-d19b-db483caecb13"
+ },
"outputs": [],
"source": [
- "def feature_map_size(input_size, kernel_size, strides=1, padding=\"SAME\"):\n",
- " if padding == \"SAME\":\n",
- " return (input_size - 1) // strides + 1\n",
- " else:\n",
- " return (input_size - kernel_size) // strides + 1"
+ "# extra code – shows that the output shape when we set strides=2\n",
+ "conv_layer = tf.keras.layers.Conv2D(filters=32, kernel_size=7, padding=\"same\",\n",
+ " strides=2)\n",
+ "fmaps = conv_layer(images)\n",
+ "fmaps.shape"
]
},
{
"cell_type": "code",
"execution_count": 15,
- "metadata": {},
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "jisXP9jfpKz2",
+ "outputId": "adfe3825-4724-4f89-e984-a75988b02927"
+ },
"outputs": [],
"source": [
- "def pad_before_and_padded_size(input_size, kernel_size, strides=1):\n",
- " fmap_size = feature_map_size(input_size, kernel_size, strides)\n",
- " padded_size = max((fmap_size - 1) * strides + kernel_size, input_size)\n",
- " pad_before = (padded_size - input_size) // 2\n",
- " return pad_before, padded_size"
+ "# extra code – this utility function can be useful to compute the size of the\n",
+ "# feature maps output by a convolutional layer. It also returns\n",
+ "# the number of ignored rows or columns if padding=\"valid\", or the\n",
+ "# number of zero-padded rows or columns if padding=\"same\".\"\"\"\n",
+ "\n",
+ "import numpy as np\n",
+ "\n",
+ "def conv_output_size(input_size, kernel_size, strides=1, padding=\"valid\"):\n",
+ " if padding==\"valid\":\n",
+ " z = input_size - kernel_size + strides\n",
+ " output_size = z // strides\n",
+ " num_ignored = z % strides\n",
+ " return output_size, num_ignored\n",
+ " else:\n",
+ " output_size = (input_size - 1) // strides + 1\n",
+ " num_padded = (output_size - 1) * strides + kernel_size - input_size\n",
+ " return output_size, num_padded\n",
+ "\n",
+ "conv_output_size(np.array([70, 120]), kernel_size=7, strides=2, padding=\"same\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "aIgA6FCopKz2"
+ },
+ "source": [
+ "Let's now look at the weights:"
]
},
{
"cell_type": "code",
"execution_count": 16,
- "metadata": {},
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "vH_xhNDVpKz2",
+ "outputId": "cc8e813f-7250-4c12-b168-8f1eb64ab9aa"
+ },
"outputs": [],
"source": [
- "def manual_same_padding(images, kernel_size, strides=1):\n",
- " if kernel_size == 1:\n",
- " return images.astype(np.float32)\n",
- " batch_size, height, width, channels = images.shape\n",
- " top_pad, padded_height = pad_before_and_padded_size(height, kernel_size, strides)\n",
- " left_pad, padded_width = pad_before_and_padded_size(width, kernel_size, strides)\n",
- " padded_shape = [batch_size, padded_height, padded_width, channels]\n",
- " padded_images = np.zeros(padded_shape, dtype=np.float32)\n",
- " padded_images[:, top_pad:height+top_pad, left_pad:width+left_pad, :] = images\n",
- " return padded_images"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Using `\"SAME\"` padding is equivalent to padding manually using `manual_same_padding()` then using `\"VALID\"` padding (confusingly, `\"VALID\"` padding means no padding at all):"
+ "kernels, biases = conv_layer.get_weights()\n",
+ "kernels.shape"
]
},
{
"cell_type": "code",
"execution_count": 17,
- "metadata": {},
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "zXIgK5tMpKz2",
+ "outputId": "53e1abff-6329-4ccd-ff6e-4ad5f53bad21"
+ },
"outputs": [],
"source": [
- "kernel_size = 7\n",
- "strides = 2\n",
- "\n",
- "conv_valid = tf.keras.layers.Conv2D(filters=1, kernel_size=kernel_size, strides=strides, padding=\"VALID\")\n",
- "conv_same = tf.keras.layers.Conv2D(filters=1, kernel_size=kernel_size, strides=strides, padding=\"SAME\")\n",
- "\n",
- "valid_output = conv_valid(manual_same_padding(images, kernel_size, strides))\n",
- "\n",
- "# Need to call build() so conv_same's weights get created\n",
- "conv_same.build(tf.TensorShape(images.shape))\n",
- "\n",
- "# Copy the weights from conv_valid to conv_same\n",
- "conv_same.set_weights(conv_valid.get_weights())\n",
- "\n",
- "same_output = conv_same(images.astype(np.float32))\n",
- "\n",
- "assert np.allclose(valid_output.numpy(), same_output.numpy())"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Pooling layer"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "## Max pooling"
+ "biases.shape"
]
},
{
"cell_type": "code",
"execution_count": 18,
- "metadata": {},
+ "metadata": {
+ "id": "ik87xvJhpKz3"
+ },
+ "outputs": [],
+ "source": [
+ "# extra code – shows how to use the tf.nn.conv2d() operation\n",
+ "\n",
+ "tf.random.set_seed(42)\n",
+ "filters = tf.random.normal([7, 7, 3, 2])\n",
+ "biases = tf.zeros([2])\n",
+ "fmaps = tf.nn.conv2d(images, filters, strides=1, padding=\"SAME\") + biases"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "vt140LrypKz3"
+ },
+ "source": [
+ "Let's manually create two filters full of zeros, except for a vertical line of 1s in the first filter, and a horizontal one in the second filter (just like in Figure 14–5). The two output feature maps highlight vertical lines and horizontal lines, respectively. In practice you will probably never need to create filters manually, since the convolutional layers will learn them automatically."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 520
+ },
+ "id": "7jSGHqKMpKz3",
+ "outputId": "c9e51288-dbc8-45db-dfed-c3faf49e5195"
+ },
+ "outputs": [],
+ "source": [
+ "# extra code – shows how to manually create two filters to get images similar\n",
+ "# to those in Figure 14–5.\n",
+ "\n",
+ "plt.figure(figsize=(15, 9))\n",
+ "filters = np.zeros([7, 7, 3, 2])\n",
+ "filters[:, 3, :, 0] = 1\n",
+ "filters[3, :, :, 1] = 1\n",
+ "fmaps = tf.nn.conv2d(images, filters, strides=1, padding=\"SAME\") + biases\n",
+ "\n",
+ "for image_idx in (0, 1):\n",
+ " for fmap_idx in (0, 1):\n",
+ " plt.subplot(2, 2, image_idx * 2 + fmap_idx + 1)\n",
+ " plt.imshow(fmaps[image_idx, :, :, fmap_idx], cmap=\"gray\")\n",
+ " plt.axis(\"off\")\n",
+ "\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "sO0dtyuVpKz3"
+ },
+ "source": [
+ "Notice the dark lines at the top and bottom of the two images on the left, and on the left and right of the two images on the right? Can you guess what these are? Why were they not present in the previous figure?\n",
+ "\n",
+ "You guessed it! These are artifacts due to the fact that we used zero padding in this case, while we did not use zero padding to create the feature maps in the previous figure. Because of zero padding, the two feature maps based on the vertical line filter (i.e., the two left images) could not fully activate near the top and bottom of the images. Similarly, the two feature maps based on the horizontal line filter (i.e., the two right images) could not fully activate near the left and right of the images."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "YXahd-O0pKz4"
+ },
+ "source": [
+ "# Pooling Layers\n",
+ "## Implementing Pooling Layers With Keras"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "obubVYH-pKz4"
+ },
+ "source": [
+ "**Max pooling**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {
+ "id": "v4qYbnjKpKz4"
+ },
"outputs": [],
"source": [
"max_pool = tf.keras.layers.MaxPool2D(pool_size=2)"
@@ -440,26 +535,38 @@
},
{
"cell_type": "code",
- "execution_count": 19,
- "metadata": {},
+ "execution_count": 21,
+ "metadata": {
+ "id": "Niwcuaw_pKz4"
+ },
"outputs": [],
"source": [
- "cropped_images = np.array([crop(image) for image in images], dtype=np.float32)\n",
- "output = max_pool(cropped_images)"
+ "output = max_pool(images)"
]
},
{
"cell_type": "code",
- "execution_count": 20,
- "metadata": {},
+ "execution_count": 22,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 366
+ },
+ "id": "sZo5TrZ6pKz4",
+ "outputId": "471b4713-527a-41e7-820d-ed09f24a2195"
+ },
"outputs": [],
"source": [
+ "# extra code – this cells generates and saves Figure 14–9\n",
+ "\n",
+ "import matplotlib as mpl\n",
+ "\n",
"fig = plt.figure(figsize=(12, 8))\n",
"gs = mpl.gridspec.GridSpec(nrows=1, ncols=2, width_ratios=[2, 1])\n",
"\n",
"ax1 = fig.add_subplot(gs[0, 0])\n",
"ax1.set_title(\"Input\", fontsize=14)\n",
- "ax1.imshow(cropped_images[0]) # plot the 1st image\n",
+ "ax1.imshow(images[0]) # plot the 1st image\n",
"ax1.axis(\"off\")\n",
"ax2 = fig.add_subplot(gs[0, 1])\n",
"ax2.set_title(\"Output\", fontsize=14)\n",
@@ -471,238 +578,306 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "yJKqdXSEpKz4"
+ },
"source": [
- "## Depth-wise pooling"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 21,
- "metadata": {},
- "outputs": [],
- "source": [
- "class DepthMaxPool(tf.keras.layers.Layer):\n",
- " def __init__(self, pool_size, strides=None, padding=\"VALID\", **kwargs):\n",
- " super().__init__(**kwargs)\n",
- " if strides is None:\n",
- " strides = pool_size\n",
- " self.pool_size = pool_size\n",
- " self.strides = strides\n",
- " self.padding = padding\n",
- " def call(self, inputs):\n",
- " return tf.nn.max_pool(inputs,\n",
- " ksize=(1, 1, 1, self.pool_size),\n",
- " strides=(1, 1, 1, self.pool_size),\n",
- " padding=self.padding)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 22,
- "metadata": {},
- "outputs": [],
- "source": [
- "depth_pool = DepthMaxPool(3)\n",
- "with tf.device(\"/cpu:0\"): # there is no GPU-kernel yet\n",
- " depth_output = depth_pool(cropped_images)\n",
- "depth_output.shape"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Or just use a `Lambda` layer:"
+ "**Depth-wise pooling**"
]
},
{
"cell_type": "code",
"execution_count": 23,
- "metadata": {},
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "ECn_PnixpKz4",
+ "outputId": "ca0cd587-9a20-40cd-b69d-56ce9be47f32"
+ },
"outputs": [],
"source": [
- "depth_pool = tf.keras.layers.Lambda(lambda X: tf.nn.max_pool(\n",
- " X, ksize=(1, 1, 1, 3), strides=(1, 1, 1, 3), padding=\"VALID\"))\n",
- "with tf.device(\"/cpu:0\"): # there is no GPU-kernel yet\n",
- " depth_output = depth_pool(cropped_images)\n",
- "depth_output.shape"
+ "# extra code – shows how to use the max_pool() op; only works on the CPU\n",
+ "np.random.seed(42)\n",
+ "fmaps = np.random.rand(2, 70, 120, 60)\n",
+ "with tf.device(\"/cpu:0\"):\n",
+ " output = tf.nn.max_pool(fmaps, ksize=(1, 1, 1, 3), strides=(1, 1, 1, 3),\n",
+ " padding=\"VALID\")\n",
+ "output.shape"
]
},
{
"cell_type": "code",
"execution_count": 24,
- "metadata": {},
+ "metadata": {
+ "id": "G9rV71mrpKz4"
+ },
"outputs": [],
"source": [
+ "class DepthPool(tf.keras.layers.Layer):\n",
+ " def __init__(self, pool_size=2, **kwargs):\n",
+ " super().__init__(**kwargs)\n",
+ " self.pool_size = pool_size\n",
+ " \n",
+ " def call(self, inputs):\n",
+ " shape = tf.shape(inputs) # shape[-1] is the number of channels\n",
+ " groups = shape[-1] // self.pool_size # number of channel groups\n",
+ " new_shape = tf.concat([shape[:-1], [groups, self.pool_size]], axis=0)\n",
+ " return tf.reduce_max(tf.reshape(inputs, new_shape), axis=-1)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "lEHRtmhXpKz5",
+ "outputId": "16fa295e-72a1-43a6-b3c6-eb6694a1bc4f"
+ },
+ "outputs": [],
+ "source": [
+ "# extra code – shows that this custom layer gives the same result as max_pool()\n",
+ "np.allclose(DepthPool(pool_size=3)(fmaps), output)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 225
+ },
+ "id": "hMJoKQxPpKz5",
+ "outputId": "8b5e494d-a7f6-4341-ba6c-2d82f2f45adc"
+ },
+ "outputs": [],
+ "source": [
+ "# extra code – computes and displays the output of the depthwise pooling layer\n",
+ "\n",
+ "depth_output = DepthPool(pool_size=3)(images)\n",
+ "\n",
"plt.figure(figsize=(12, 8))\n",
"plt.subplot(1, 2, 1)\n",
"plt.title(\"Input\", fontsize=14)\n",
- "plot_color_image(cropped_images[0]) # plot the 1st image\n",
+ "plt.imshow(images[0]) # plot the 1st image\n",
+ "plt.axis(\"off\")\n",
"plt.subplot(1, 2, 2)\n",
"plt.title(\"Output\", fontsize=14)\n",
- "plot_image(depth_output[0, ..., 0]) # plot the output for the 1st image\n",
+ "plt.imshow(depth_output[0, ..., 0], cmap=\"gray\") # plot 1st image's output\n",
"plt.axis(\"off\")\n",
"plt.show()"
]
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "O5Sel6supKz5"
+ },
"source": [
- "## Average pooling"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 25,
- "metadata": {},
- "outputs": [],
- "source": [
- "avg_pool = tf.keras.layers.AvgPool2D(pool_size=2)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 26,
- "metadata": {},
- "outputs": [],
- "source": [
- "output_avg = avg_pool(cropped_images)"
+ "**Global Average Pooling**"
]
},
{
"cell_type": "code",
"execution_count": 27,
- "metadata": {},
+ "metadata": {
+ "id": "KW52BwBypKz5"
+ },
"outputs": [],
"source": [
- "fig = plt.figure(figsize=(12, 8))\n",
- "gs = mpl.gridspec.GridSpec(nrows=1, ncols=2, width_ratios=[2, 1])\n",
- "\n",
- "ax1 = fig.add_subplot(gs[0, 0])\n",
- "ax1.set_title(\"Input\", fontsize=14)\n",
- "ax1.imshow(cropped_images[0]) # plot the 1st image\n",
- "ax1.axis(\"off\")\n",
- "ax2 = fig.add_subplot(gs[0, 1])\n",
- "ax2.set_title(\"Output\", fontsize=14)\n",
- "ax2.imshow(output_avg[0]) # plot the output for the 1st image\n",
- "ax2.axis(\"off\")\n",
- "plt.show()"
+ "global_avg_pool = tf.keras.layers.GlobalAvgPool2D()"
]
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "s2E12ccVpKz5"
+ },
"source": [
- "## Global Average Pooling"
+ "The following layer is equivalent:"
]
},
{
"cell_type": "code",
"execution_count": 28,
- "metadata": {},
+ "metadata": {
+ "id": "nG_X-OuTpKz5"
+ },
"outputs": [],
"source": [
- "global_avg_pool = tf.keras.layers.GlobalAvgPool2D()\n",
- "global_avg_pool(cropped_images)"
+ "global_avg_pool = tf.keras.layers.Lambda(\n",
+ " lambda X: tf.reduce_mean(X, axis=[1, 2]))"
]
},
{
"cell_type": "code",
"execution_count": 29,
- "metadata": {},
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "Ygy0q39xpKz5",
+ "outputId": "39084b74-1687-458d-dd82-84277f3bd221"
+ },
"outputs": [],
"source": [
- "output_global_avg2 = tf.keras.layers.Lambda(lambda X: tf.reduce_mean(X, axis=[1, 2]))\n",
- "output_global_avg2(cropped_images)"
+ "global_avg_pool(images)"
]
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "oid44Xx-pKz6"
+ },
"source": [
- "# Tackling Fashion MNIST With a CNN"
+ "# CNN Architectures"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "ELZe7PLfpKz6"
+ },
+ "source": [
+ "**Tackling Fashion MNIST With a CNN**"
]
},
{
"cell_type": "code",
"execution_count": 30,
- "metadata": {},
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "1IXwgw_0pKz6",
+ "outputId": "4cd7176d-5b2d-4bab-efd3-5ad967a2e43b"
+ },
"outputs": [],
"source": [
- "(X_train_full, y_train_full), (X_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()\n",
+ "# extra code – loads the mnist dataset, add the channels axis to the inputs,\n",
+ "# scales the values to the 0-1 range, and splits the dataset\n",
+ "mnist = tf.keras.datasets.fashion_mnist.load_data()\n",
+ "(X_train_full, y_train_full), (X_test, y_test) = mnist\n",
+ "X_train_full = np.expand_dims(X_train_full, axis=-1).astype(np.float32) / 255\n",
+ "X_test = np.expand_dims(X_test.astype(np.float32), axis=-1) / 255\n",
"X_train, X_valid = X_train_full[:-5000], X_train_full[-5000:]\n",
- "y_train, y_valid = y_train_full[:-5000], y_train_full[-5000:]\n",
- "\n",
- "X_mean = X_train.mean(axis=0, keepdims=True)\n",
- "X_std = X_train.std(axis=0, keepdims=True) + 1e-7\n",
- "X_train = (X_train - X_mean) / X_std\n",
- "X_valid = (X_valid - X_mean) / X_std\n",
- "X_test = (X_test - X_mean) / X_std\n",
- "\n",
- "X_train = X_train[..., np.newaxis]\n",
- "X_valid = X_valid[..., np.newaxis]\n",
- "X_test = X_test[..., np.newaxis]"
+ "y_train, y_valid = y_train_full[:-5000], y_train_full[-5000:]"
]
},
{
"cell_type": "code",
"execution_count": 31,
- "metadata": {},
+ "metadata": {
+ "id": "34upiak4pKz6"
+ },
"outputs": [],
"source": [
"from functools import partial\n",
"\n",
- "DefaultConv2D = partial(tf.keras.layers.Conv2D,\n",
- " kernel_size=3, activation='relu', padding=\"SAME\")\n",
- "\n",
+ "tf.random.set_seed(42) # extra code – ensures reproducibility\n",
+ "DefaultConv2D = partial(tf.keras.layers.Conv2D, kernel_size=3, padding=\"same\",\n",
+ " activation=\"relu\", kernel_initializer=\"he_normal\")\n",
"model = tf.keras.Sequential([\n",
" DefaultConv2D(filters=64, kernel_size=7, input_shape=[28, 28, 1]),\n",
- " tf.keras.layers.MaxPooling2D(pool_size=2),\n",
+ " tf.keras.layers.MaxPool2D(),\n",
" DefaultConv2D(filters=128),\n",
" DefaultConv2D(filters=128),\n",
- " tf.keras.layers.MaxPooling2D(pool_size=2),\n",
+ " tf.keras.layers.MaxPool2D(),\n",
" DefaultConv2D(filters=256),\n",
" DefaultConv2D(filters=256),\n",
- " tf.keras.layers.MaxPooling2D(pool_size=2),\n",
+ " tf.keras.layers.MaxPool2D(),\n",
" tf.keras.layers.Flatten(),\n",
- " tf.keras.layers.Dense(units=128, activation='relu'),\n",
+ " tf.keras.layers.Dense(units=128, activation=\"relu\",\n",
+ " kernel_initializer=\"he_normal\"),\n",
" tf.keras.layers.Dropout(0.5),\n",
- " tf.keras.layers.Dense(units=64, activation='relu'),\n",
+ " tf.keras.layers.Dense(units=64, activation=\"relu\",\n",
+ " kernel_initializer=\"he_normal\"),\n",
" tf.keras.layers.Dropout(0.5),\n",
- " tf.keras.layers.Dense(units=10, activation='softmax'),\n",
+ " tf.keras.layers.Dense(units=10, activation=\"softmax\")\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": 32,
- "metadata": {},
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "KZbWeIBYpKz6",
+ "outputId": "fd2181cd-3092-4f03-96ff-b573a39b21ef"
+ },
"outputs": [],
"source": [
- "model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"nadam\", metrics=[\"accuracy\"])\n",
- "history = model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))\n",
+ "# extra code – compiles, fits, evaluates, and uses the model to make predictions\n",
+ "model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"nadam\",\n",
+ " metrics=[\"accuracy\"])\n",
+ "history = model.fit(X_train, y_train, epochs=10,\n",
+ " validation_data=(X_valid, y_valid))\n",
"score = model.evaluate(X_test, y_test)\n",
- "X_new = X_test[:10] # pretend we have new images\n",
+ "X_new = X_test[:10] # pretend we have new images\n",
"y_pred = model.predict(X_new)"
]
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "h9kyemsZpKz6"
+ },
"source": [
- "## ResNet-34"
+ "## LeNet-5"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "5glqD4rWpKz7"
+ },
+ "source": [
+ "The famous LeNet-5 architecture had the following layers:\n",
+ "\n",
+ "Layer | Type | Maps | Size | Kernel size | Stride | Activation\n",
+ "-------|-----------------|------|----------|-------------|--------|-----------\n",
+ " Out | Fully connected | – | 10 | – | – | RBF\n",
+ " F6 | Fully connected | – | 84 | – | – | tanh\n",
+ " C5 | Convolution | 120 | 1 × 1 | 5 × 5 | 1 | tanh\n",
+ " S4 | Avg pooling | 16 | 5 × 5 | 2 × 2 | 2 | tanh\n",
+ " C3 | Convolution | 16 | 10 × 10 | 5 × 5 | 1 | tanh\n",
+ " S2 | Avg pooling | 6 | 14 × 14 | 2 × 2 | 2 | tanh\n",
+ " C1 | Convolution | 6 | 28 × 28 | 5 × 5 | 1 | tanh\n",
+ " In | Input | 1 | 32 × 32 | – | – | –\n",
+ "\n",
+ "There were a few tweaks here and there, which don't really matter much anymore, but in case you are interested, here they are:\n",
+ "\n",
+ "* MNIST images are 28 × 28 pixels, but they are zero-padded to 32 × 32 pixels and normalized before being fed to the network. The rest of the network does not use any padding, which is why the size keeps shrinking as the image progresses through the network.\n",
+ "* The average pooling layers are slightly more complex than usual: each neuron computes the mean of its inputs, then multiplies the result by a learnable coefficient (one per map) and adds a learnable bias term (again, one per map), then finally applies the activation function.\n",
+ "* Most neurons in C3 maps are connected to neurons in only three or four S2 maps (instead of all six S2 maps). See table 1 (page 8) in the [original paper](https://homl.info/lenet5) for details.\n",
+ "* The output layer is a bit special: instead of computing the matrix multiplication of the inputs and the weight vector, each neuron outputs the square of the Euclidian distance between its input vector and its weight vector. Each output measures how much the image belongs to a particular digit class. The cross-entropy cost function is now preferred, as it penalizes bad predictions much more, producing larger gradients and converging faster."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "iV10vudGpKz7"
+ },
+ "source": [
+ "# Implementing a ResNet-34 CNN Using Keras"
]
},
{
"cell_type": "code",
"execution_count": 33,
- "metadata": {},
+ "metadata": {
+ "id": "p9EoM1dTpKz7"
+ },
"outputs": [],
"source": [
"DefaultConv2D = partial(tf.keras.layers.Conv2D, kernel_size=3, strides=1,\n",
- " padding=\"SAME\", use_bias=False)\n",
+ " padding=\"same\", kernel_initializer=\"he_normal\",\n",
+ " use_bias=False)\n",
"\n",
"class ResidualUnit(tf.keras.layers.Layer):\n",
" def __init__(self, filters, strides=1, activation=\"relu\", **kwargs):\n",
@@ -713,12 +888,14 @@
" tf.keras.layers.BatchNormalization(),\n",
" self.activation,\n",
" DefaultConv2D(filters),\n",
- " tf.keras.layers.BatchNormalization()]\n",
+ " tf.keras.layers.BatchNormalization()\n",
+ " ]\n",
" self.skip_layers = []\n",
" if strides > 1:\n",
" self.skip_layers = [\n",
" DefaultConv2D(filters, kernel_size=1, strides=strides),\n",
- " tf.keras.layers.BatchNormalization()]\n",
+ " tf.keras.layers.BatchNormalization()\n",
+ " ]\n",
"\n",
" def call(self, inputs):\n",
" Z = inputs\n",
@@ -733,206 +910,229 @@
{
"cell_type": "code",
"execution_count": 34,
- "metadata": {},
+ "metadata": {
+ "id": "_0qA-kSkpKz7"
+ },
"outputs": [],
"source": [
- "model = tf.keras.Sequential()\n",
- "model.add(DefaultConv2D(64, kernel_size=7, strides=2,\n",
- " input_shape=[224, 224, 3]))\n",
- "model.add(tf.keras.layers.BatchNormalization())\n",
- "model.add(tf.keras.layers.Activation(\"relu\"))\n",
- "model.add(tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding=\"SAME\"))\n",
+ "model = tf.keras.Sequential([\n",
+ " DefaultConv2D(64, kernel_size=7, strides=2, input_shape=[224, 224, 3]),\n",
+ " tf.keras.layers.BatchNormalization(),\n",
+ " tf.keras.layers.Activation(\"relu\"),\n",
+ " tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding=\"same\"),\n",
+ "])\n",
"prev_filters = 64\n",
"for filters in [64] * 3 + [128] * 4 + [256] * 6 + [512] * 3:\n",
" strides = 1 if filters == prev_filters else 2\n",
" model.add(ResidualUnit(filters, strides=strides))\n",
" prev_filters = filters\n",
+ "\n",
"model.add(tf.keras.layers.GlobalAvgPool2D())\n",
"model.add(tf.keras.layers.Flatten())\n",
"model.add(tf.keras.layers.Dense(10, activation=\"softmax\"))"
]
},
{
- "cell_type": "code",
- "execution_count": 35,
- "metadata": {},
- "outputs": [],
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "tWnoERqepKz7"
+ },
"source": [
- "model.summary()"
+ "# Using Pretrained Models from Keras"
]
},
{
- "cell_type": "markdown",
- "metadata": {},
+ "cell_type": "code",
+ "execution_count": 35,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "wbS9p1FnpKz7",
+ "outputId": "a3d0e499-1036-478e-85af-06e882e25e21"
+ },
+ "outputs": [],
"source": [
- "## Using a Pretrained Model"
+ "model = tf.keras.applications.ResNet50(weights=\"imagenet\")"
]
},
{
"cell_type": "code",
"execution_count": 36,
- "metadata": {},
+ "metadata": {
+ "id": "_QhYKi22pKz8"
+ },
"outputs": [],
"source": [
- "model = tf.keras.applications.resnet50.ResNet50(weights=\"imagenet\")"
+ "images = load_sample_images()[\"images\"]\n",
+ "images_resized = tf.keras.layers.Resizing(height=224, width=224,\n",
+ " crop_to_aspect_ratio=True)(images)"
]
},
{
"cell_type": "code",
"execution_count": 37,
- "metadata": {},
+ "metadata": {
+ "id": "usbPpqkqpKz8"
+ },
"outputs": [],
"source": [
- "images_resized = tf.image.resize(images, [224, 224])\n",
- "plot_color_image(images_resized[0])\n",
- "plt.show()"
+ "inputs = tf.keras.applications.resnet50.preprocess_input(images_resized)"
]
},
{
"cell_type": "code",
"execution_count": 38,
- "metadata": {},
- "outputs": [],
- "source": [
- "images_resized = tf.image.resize_with_pad(images, 224, 224, antialias=True)\n",
- "plot_color_image(images_resized[0])"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 39,
- "metadata": {},
- "outputs": [],
- "source": [
- "images_resized = tf.image.resize_with_crop_or_pad(images, 224, 224)\n",
- "plot_color_image(images_resized[0])\n",
- "plt.show()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 40,
- "metadata": {},
- "outputs": [],
- "source": [
- "china_box = [0, 0.03, 1, 0.68]\n",
- "flower_box = [0.19, 0.26, 0.86, 0.7]\n",
- "images_resized = tf.image.crop_and_resize(images, [china_box, flower_box], [0, 1], [224, 224])\n",
- "plot_color_image(images_resized[0])\n",
- "plt.show()\n",
- "plot_color_image(images_resized[1])\n",
- "plt.show()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 41,
- "metadata": {},
- "outputs": [],
- "source": [
- "inputs = tf.keras.applications.resnet50.preprocess_input(images_resized * 255)\n",
- "Y_proba = model.predict(inputs)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 42,
- "metadata": {},
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "M-IYqzqRpKz8",
+ "outputId": "5e89ff3b-8afb-4d34-a769-dbe2d70983b5"
+ },
"outputs": [],
"source": [
+ "Y_proba = model.predict(inputs)\n",
"Y_proba.shape"
]
},
{
"cell_type": "code",
- "execution_count": 43,
- "metadata": {},
+ "execution_count": 39,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "-uWvslEcpKz8",
+ "outputId": "5390390e-2edb-4bc3-b1aa-ff14bc2abfe3"
+ },
"outputs": [],
"source": [
"top_K = tf.keras.applications.resnet50.decode_predictions(Y_proba, top=3)\n",
"for image_index in range(len(images)):\n",
- " print(\"Image #{}\".format(image_index))\n",
+ " print(f\"Image #{image_index}\")\n",
" for class_id, name, y_proba in top_K[image_index]:\n",
- " print(\" {} - {:12s} {:.2f}%\".format(class_id, name, y_proba * 100))\n",
- " print()"
+ " print(f\" {class_id} - {name:12s} {y_proba:.2%}\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 40,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 0
+ },
+ "id": "alc_cnxVpKz8",
+ "outputId": "9918157b-9826-4c6d-8e64-9edaff7a8dc7"
+ },
+ "outputs": [],
+ "source": [
+ "# extra code – displays the cropped and resized images\n",
+ "\n",
+ "plt.figure(figsize=(10, 6))\n",
+ "for idx in (0, 1):\n",
+ " plt.subplot(1, 2, idx + 1)\n",
+ " plt.imshow(images_resized[idx] / 255)\n",
+ " plt.axis(\"off\")\n",
+ "\n",
+ "plt.show()"
]
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "hqxnSBJ3pKz8"
+ },
"source": [
- "## Pretrained Models for Transfer Learning"
+ "# Pretrained Models for Transfer Learning"
]
},
{
"cell_type": "code",
- "execution_count": 44,
- "metadata": {},
+ "execution_count": 41,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 208,
+ "referenced_widgets": [
+ "2839afc6cb6d4a50b0bdad1fcb7f39d1",
+ "1c08c78c0d484eed9638ad2b757ab584",
+ "eefd1a01ef1c46e09ffbd97ad25377cf",
+ "d142189db76a4681a22f38ae252e4ebc",
+ "d441368305704ab9a3bdbe762ab340a4",
+ "57cbb645792f45adbfab9b29aa708809",
+ "b681dc2200ad4ee397a46602e8f4f654",
+ "0401482a18a94f22b95d5321bfa6f414",
+ "54a90429726b4d848358cafae87ad893",
+ "8f0660be3bf44dd48fd42cd52a507e32",
+ "f8ef3c06db574e3f88dc9a8c0bcd22ab"
+ ]
+ },
+ "id": "mbktvHOXpKz8",
+ "outputId": "ee28b6fc-e112-4d2a-ad11-6bbb88c56a38"
+ },
"outputs": [],
"source": [
"import tensorflow_datasets as tfds\n",
"\n",
- "dataset, info = tfds.load(\"tf_flowers\", as_supervised=True, with_info=True)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 45,
- "metadata": {},
- "outputs": [],
- "source": [
- "info.splits"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 46,
- "metadata": {},
- "outputs": [],
- "source": [
- "info.splits[\"train\"]"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 47,
- "metadata": {},
- "outputs": [],
- "source": [
+ "dataset, info = tfds.load(\"tf_flowers\", as_supervised=True, with_info=True)\n",
+ "dataset_size = info.splits[\"train\"].num_examples\n",
"class_names = info.features[\"label\"].names\n",
- "class_names"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 48,
- "metadata": {},
- "outputs": [],
- "source": [
"n_classes = info.features[\"label\"].num_classes"
]
},
{
"cell_type": "code",
- "execution_count": 49,
- "metadata": {},
+ "execution_count": 42,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "769isDkDpKz8",
+ "outputId": "891b3c57-1212-4959-b24f-574ad366cf4d"
+ },
"outputs": [],
"source": [
- "dataset_size = info.splits[\"train\"].num_examples\n",
"dataset_size"
]
},
{
- "cell_type": "markdown",
- "metadata": {},
+ "cell_type": "code",
+ "execution_count": 43,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "Nok5SNbEpKz9",
+ "outputId": "f79e7c41-6454-4ae7-a497-a60024938480"
+ },
+ "outputs": [],
"source": [
- "**Warning:** TFDS's split API has evolved since the book was published. The [new split API](https://www.tensorflow.org/datasets/splits) (called S3) is much simpler to use:"
+ "class_names"
]
},
{
"cell_type": "code",
- "execution_count": 50,
- "metadata": {},
+ "execution_count": 44,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "D50TeDylpKz9",
+ "outputId": "f795a3b2-1170-49e2-8508-de275c0f1861"
+ },
+ "outputs": [],
+ "source": [
+ "n_classes"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 45,
+ "metadata": {
+ "id": "M-lgeD08pKz9"
+ },
"outputs": [],
"source": [
"test_set_raw, valid_set_raw, train_set_raw = tfds.load(\n",
@@ -943,17 +1143,26 @@
},
{
"cell_type": "code",
- "execution_count": 51,
- "metadata": {},
+ "execution_count": 46,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 592
+ },
+ "id": "niSFaiTgpKz9",
+ "outputId": "45879b7b-31f5-43c8-dcbd-9a1865867a17"
+ },
"outputs": [],
"source": [
+ "# extra code – displays the first 9 images in the validation set\n",
+ "\n",
"plt.figure(figsize=(12, 10))\n",
"index = 0\n",
- "for image, label in train_set_raw.take(9):\n",
+ "for image, label in valid_set_raw.take(9):\n",
" index += 1\n",
" plt.subplot(3, 3, index)\n",
" plt.imshow(image)\n",
- " plt.title(\"Class: {}\".format(class_names[label]))\n",
+ " plt.title(f\"Class: {class_names[label]}\")\n",
" plt.axis(\"off\")\n",
"\n",
"plt.show()"
@@ -961,79 +1170,64 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "gXG6iv8XpKz9"
+ },
"source": [
- "Basic preprocessing:"
+ "All three datasets contain individual images. We need to batch them, but for this we first need to ensure they all have the same size, or else batching will not work. We can use a `Resizing` layer for this. We must also call the `tf.keras.applications.xception.preprocess_input()` function to preprocess the images appropriately for the Xception model. We will also add shuffling and prefetching to the training dataset."
]
},
{
"cell_type": "code",
- "execution_count": 52,
- "metadata": {},
+ "execution_count": 47,
+ "metadata": {
+ "id": "Bnz0n9XApKz9"
+ },
"outputs": [],
"source": [
- "def preprocess(image, label):\n",
- " resized_image = tf.image.resize(image, [224, 224])\n",
- " final_image = tf.keras.applications.xception.preprocess_input(resized_image)\n",
- " return final_image, label"
+ "tf.keras.backend.clear_session() # extra code – resets layer name counter\n",
+ "\n",
+ "batch_size = 32\n",
+ "preprocess = tf.keras.Sequential([\n",
+ " tf.keras.layers.Resizing(height=224, width=224, crop_to_aspect_ratio=True),\n",
+ " tf.keras.layers.Lambda(tf.keras.applications.xception.preprocess_input)\n",
+ "])\n",
+ "train_set = train_set_raw.map(lambda X, y: (preprocess(X), y))\n",
+ "train_set = train_set.shuffle(1000, seed=42).batch(batch_size).prefetch(1)\n",
+ "valid_set = valid_set_raw.map(lambda X, y: (preprocess(X), y)).batch(batch_size)\n",
+ "test_set = test_set_raw.map(lambda X, y: (preprocess(X), y)).batch(batch_size)"
]
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "ovNEMky-pKz9"
+ },
"source": [
- "Slightly fancier preprocessing (but you could add much more data augmentation):"
+ "Let's take a look again at the first 9 images from the validation set: they're all 224x224 now, with values ranging from -1 to 1:"
]
},
{
"cell_type": "code",
- "execution_count": 53,
- "metadata": {},
+ "execution_count": 48,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 700
+ },
+ "id": "ZL3c3i4opKz9",
+ "outputId": "38847d8d-8822-41a3-cfb2-27479aa5debe"
+ },
"outputs": [],
"source": [
- "def central_crop(image):\n",
- " shape = tf.shape(image)\n",
- " min_dim = tf.reduce_min([shape[0], shape[1]])\n",
- " top_crop = (shape[0] - min_dim) // 4\n",
- " bottom_crop = shape[0] - top_crop\n",
- " left_crop = (shape[1] - min_dim) // 4\n",
- " right_crop = shape[1] - left_crop\n",
- " return image[top_crop:bottom_crop, left_crop:right_crop]\n",
+ "# extra code – displays the first 9 images in the first batch of valid_set\n",
"\n",
- "def random_crop(image):\n",
- " shape = tf.shape(image)\n",
- " min_dim = tf.reduce_min([shape[0], shape[1]]) * 90 // 100\n",
- " return tf.image.random_crop(image, [min_dim, min_dim, 3])\n",
- "\n",
- "def preprocess(image, label, randomize=False):\n",
- " if randomize:\n",
- " cropped_image = random_crop(image)\n",
- " cropped_image = tf.image.random_flip_left_right(cropped_image)\n",
- " else:\n",
- " cropped_image = central_crop(image)\n",
- " resized_image = tf.image.resize(cropped_image, [224, 224])\n",
- " final_image = tf.keras.applications.xception.preprocess_input(resized_image)\n",
- " return final_image, label\n",
- "\n",
- "batch_size = 32\n",
- "train_set = train_set_raw.shuffle(1000).repeat()\n",
- "train_set = train_set.map(partial(preprocess, randomize=True)).batch(batch_size).prefetch(1)\n",
- "valid_set = valid_set_raw.map(preprocess).batch(batch_size).prefetch(1)\n",
- "test_set = test_set_raw.map(preprocess).batch(batch_size).prefetch(1)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 54,
- "metadata": {},
- "outputs": [],
- "source": [
"plt.figure(figsize=(12, 12))\n",
- "for X_batch, y_batch in train_set.take(1):\n",
+ "for X_batch, y_batch in valid_set.take(1):\n",
" for index in range(9):\n",
" plt.subplot(3, 3, index + 1)\n",
- " plt.imshow(X_batch[index] / 2 + 0.5)\n",
- " plt.title(\"Class: {}\".format(class_names[y_batch[index]]))\n",
+ " plt.imshow((X_batch[index] + 1) / 2) # rescale to 0–1 for imshow()\n",
+ " plt.title(f\"Class: {class_names[y_batch[index]]}\")\n",
" plt.axis(\"off\")\n",
"\n",
"plt.show()"
@@ -1041,29 +1235,82 @@
},
{
"cell_type": "code",
- "execution_count": 55,
- "metadata": {},
+ "execution_count": 49,
+ "metadata": {
+ "id": "Ib0cA8Y1pKz9"
+ },
"outputs": [],
"source": [
+ "data_augmentation = tf.keras.Sequential([\n",
+ " tf.keras.layers.RandomFlip(mode=\"horizontal\", seed=42),\n",
+ " tf.keras.layers.RandomRotation(factor=0.05, seed=42),\n",
+ " tf.keras.layers.RandomContrast(factor=0.2, seed=42)\n",
+ "])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "G7GrQjsspKz-"
+ },
+ "source": [
+ "Try running the following cell multiple times to see different random data augmentations:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 50,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 700
+ },
+ "id": "w6GH5_vupKz-",
+ "outputId": "eeb2c924-2f4f-4aa1-bea9-951bebef4bf0"
+ },
+ "outputs": [],
+ "source": [
+ "# extra code – displays the same first 9 images, after augmentation\n",
+ "\n",
"plt.figure(figsize=(12, 12))\n",
- "for X_batch, y_batch in test_set.take(1):\n",
+ "for X_batch, y_batch in valid_set.take(1):\n",
+ " X_batch_augmented = data_augmentation(X_batch, training=True)\n",
" for index in range(9):\n",
" plt.subplot(3, 3, index + 1)\n",
- " plt.imshow(X_batch[index] / 2 + 0.5)\n",
- " plt.title(\"Class: {}\".format(class_names[y_batch[index]]))\n",
+ " # We must rescale the images to the 0-1 range for imshow(), and also\n",
+ " # clip the result to that range, because data augmentation may\n",
+ " # make some values go out of bounds (e.g., RandomContrast in this case).\n",
+ " plt.imshow(np.clip((X_batch_augmented[index] + 1) / 2, 0, 1))\n",
+ " plt.title(f\"Class: {class_names[y_batch[index]]}\")\n",
" plt.axis(\"off\")\n",
"\n",
"plt.show()"
]
},
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "kNL9AOsDpKz-"
+ },
+ "source": [
+ "Now let's load the pretrained model, without its top layers, and replace them with our own, for the flower classification task:"
+ ]
+ },
{
"cell_type": "code",
- "execution_count": 56,
- "metadata": {},
+ "execution_count": 51,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "lRyCgvaKpKz-",
+ "outputId": "a825e173-8b1d-4217-a1c4-5491b49c3e82"
+ },
"outputs": [],
"source": [
+ "tf.random.set_seed(42) # extra code – ensures reproducibility\n",
"base_model = tf.keras.applications.xception.Xception(weights=\"imagenet\",\n",
- " include_top=False)\n",
+ " include_top=False)\n",
"avg = tf.keras.layers.GlobalAveragePooling2D()(base_model.output)\n",
"output = tf.keras.layers.Dense(n_classes, activation=\"softmax\")(avg)\n",
"model = tf.keras.Model(inputs=base_model.input, outputs=output)"
@@ -1071,82 +1318,127 @@
},
{
"cell_type": "code",
- "execution_count": 57,
- "metadata": {},
- "outputs": [],
- "source": [
- "for index, layer in enumerate(base_model.layers):\n",
- " print(index, layer.name)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 58,
- "metadata": {},
+ "execution_count": 52,
+ "metadata": {
+ "id": "KBlyG6ElpKz-"
+ },
"outputs": [],
"source": [
"for layer in base_model.layers:\n",
- " layer.trainable = False\n",
- "\n",
- "optimizer = tf.keras.optimizers.SGD(learning_rate=0.2, momentum=0.9, decay=0.01)\n",
- "model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer,\n",
- " metrics=[\"accuracy\"])\n",
- "history = model.fit(train_set,\n",
- " steps_per_epoch=int(0.75 * dataset_size / batch_size),\n",
- " validation_data=valid_set,\n",
- " validation_steps=int(0.15 * dataset_size / batch_size),\n",
- " epochs=5)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 59,
- "metadata": {},
- "outputs": [],
- "source": [
- "for layer in base_model.layers:\n",
- " layer.trainable = True\n",
- "\n",
- "optimizer = tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9,\n",
- " nesterov=True, decay=0.001)\n",
- "model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer,\n",
- " metrics=[\"accuracy\"])\n",
- "history = model.fit(train_set,\n",
- " steps_per_epoch=int(0.75 * dataset_size / batch_size),\n",
- " validation_data=valid_set,\n",
- " validation_steps=int(0.15 * dataset_size / batch_size),\n",
- " epochs=40)"
+ " layer.trainable = False"
]
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "WFEFw7GKpKz-"
+ },
+ "source": [
+ "Let's train the model for a few epochs, while keeping the base model weights fixed:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 53,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "GGxK2yPcpKz-",
+ "outputId": "6b64214a-e104-4b6c-9b7a-3388fc9aa15f"
+ },
+ "outputs": [],
+ "source": [
+ "optimizer = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.9)\n",
+ "model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer,\n",
+ " metrics=[\"accuracy\"])\n",
+ "history = model.fit(train_set, validation_data=valid_set, epochs=3)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 54,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "GvGMiJMLpKz-",
+ "outputId": "91f2c96c-c058-45e0-e428-66fa6076ad56"
+ },
+ "outputs": [],
+ "source": [
+ "for indices in zip(range(33), range(33, 66), range(66, 99), range(99, 132)):\n",
+ " for idx in indices:\n",
+ " print(f\"{idx:3}: {base_model.layers[idx].name:22}\", end=\"\")\n",
+ " print()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "L_bEwL8KpKz_"
+ },
+ "source": [
+ "Now that the weights of our new top layers are not too bad, we can make the top part of the base model trainable again, and continue training, but with a lower learning rate:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 55,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "GEUNGlhvpKz_",
+ "outputId": "c622a91d-f634-4443-b87e-8d46defdb578"
+ },
+ "outputs": [],
+ "source": [
+ "for layer in base_model.layers[56:]:\n",
+ " layer.trainable = True\n",
+ "\n",
+ "optimizer = tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9)\n",
+ "model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer,\n",
+ " metrics=[\"accuracy\"])\n",
+ "history = model.fit(train_set, validation_data=valid_set, epochs=10)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "mpVsD1f8pKz_"
+ },
"source": [
"# Classification and Localization"
]
},
{
"cell_type": "code",
- "execution_count": 60,
- "metadata": {},
+ "execution_count": 56,
+ "metadata": {
+ "id": "k_7rd9hopKz_"
+ },
"outputs": [],
"source": [
+ "tf.random.set_seed(42) # extra code – ensures reproducibility\n",
"base_model = tf.keras.applications.xception.Xception(weights=\"imagenet\",\n",
- " include_top=False)\n",
+ " include_top=False)\n",
"avg = tf.keras.layers.GlobalAveragePooling2D()(base_model.output)\n",
"class_output = tf.keras.layers.Dense(n_classes, activation=\"softmax\")(avg)\n",
"loc_output = tf.keras.layers.Dense(4)(avg)\n",
"model = tf.keras.Model(inputs=base_model.input,\n",
- " outputs=[class_output, loc_output])\n",
+ " outputs=[class_output, loc_output])\n",
"model.compile(loss=[\"sparse_categorical_crossentropy\", \"mse\"],\n",
- " loss_weights=[0.8, 0.2], # depends on what you care most about\n",
+ " loss_weights=[0.8, 0.2], # depends on what you care most about\n",
" optimizer=optimizer, metrics=[\"accuracy\"])"
]
},
{
"cell_type": "code",
- "execution_count": 61,
- "metadata": {},
+ "execution_count": 57,
+ "metadata": {
+ "id": "E0XZoWKjpKz_"
+ },
"outputs": [],
"source": [
"def add_random_bounding_boxes(images, labels):\n",
@@ -1158,24 +1450,34 @@
},
{
"cell_type": "code",
- "execution_count": 62,
- "metadata": {},
+ "execution_count": 58,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "gGGaA3SJpKz_",
+ "outputId": "2e525486-d886-4ba1-c123-1c8cdf7f1b8a"
+ },
"outputs": [],
"source": [
- "model.fit(fake_train_set, steps_per_epoch=5, epochs=2)"
+ "model.fit(fake_train_set, epochs=2)"
]
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "mD9oCJ7vpKz_"
+ },
"source": [
- "## Mean Average Precision (mAP)"
+ "# Extra Material – How mAP Relates to Precision/Recall"
]
},
{
"cell_type": "code",
- "execution_count": 63,
- "metadata": {},
+ "execution_count": 59,
+ "metadata": {
+ "id": "fgjxsrkLpKz_"
+ },
"outputs": [],
"source": [
"def maximum_precisions(precisions):\n",
@@ -1184,8 +1486,15 @@
},
{
"cell_type": "code",
- "execution_count": 64,
- "metadata": {},
+ "execution_count": 60,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 287
+ },
+ "id": "pB2kJkHrpKz_",
+ "outputId": "fd9f2bc1-ae06-4c60-8a0c-f4bec6577efa"
+ },
"outputs": [],
"source": [
"recalls = np.linspace(0, 1, 11)\n",
@@ -1206,142 +1515,54 @@
},
{
"cell_type": "markdown",
- "metadata": {},
- "source": [
- "Transpose convolutions:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 65,
- "metadata": {},
- "outputs": [],
- "source": [
- "tf.random.set_seed(42)\n",
- "X = images_resized.numpy()\n",
- "\n",
- "conv_transpose = tf.keras.layers.Conv2DTranspose(filters=5, kernel_size=3, strides=2, padding=\"VALID\")\n",
- "output = conv_transpose(X)\n",
- "output.shape"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 66,
- "metadata": {},
- "outputs": [],
- "source": [
- "def normalize(X):\n",
- " return (X - tf.reduce_min(X)) / (tf.reduce_max(X) - tf.reduce_min(X))\n",
- "\n",
- "fig = plt.figure(figsize=(12, 8))\n",
- "gs = mpl.gridspec.GridSpec(nrows=1, ncols=2, width_ratios=[1, 2])\n",
- "\n",
- "ax1 = fig.add_subplot(gs[0, 0])\n",
- "ax1.set_title(\"Input\", fontsize=14)\n",
- "ax1.imshow(X[0]) # plot the 1st image\n",
- "ax1.axis(\"off\")\n",
- "ax2 = fig.add_subplot(gs[0, 1])\n",
- "ax2.set_title(\"Output\", fontsize=14)\n",
- "ax2.imshow(normalize(output[0, ..., :3]), interpolation=\"bicubic\") # plot the output for the 1st image\n",
- "ax2.axis(\"off\")\n",
- "plt.show()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 67,
- "metadata": {},
- "outputs": [],
- "source": [
- "def upscale_images(images, stride, kernel_size):\n",
- " batch_size, height, width, channels = images.shape\n",
- " upscaled = np.zeros((batch_size,\n",
- " (height - 1) * stride + 2 * kernel_size - 1,\n",
- " (width - 1) * stride + 2 * kernel_size - 1,\n",
- " channels))\n",
- " upscaled[:,\n",
- " kernel_size - 1:(height - 1) * stride + kernel_size:stride,\n",
- " kernel_size - 1:(width - 1) * stride + kernel_size:stride,\n",
- " :] = images\n",
- " return upscaled"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 68,
- "metadata": {},
- "outputs": [],
- "source": [
- "upscaled = upscale_images(X, stride=2, kernel_size=3)\n",
- "weights, biases = conv_transpose.weights\n",
- "reversed_filters = np.flip(weights.numpy(), axis=[0, 1])\n",
- "reversed_filters = np.transpose(reversed_filters, [0, 1, 3, 2])\n",
- "manual_output = tf.nn.conv2d(upscaled, reversed_filters, strides=1, padding=\"VALID\")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 69,
"metadata": {
- "scrolled": true
+ "id": "hFjs5WBKpK0A"
},
- "outputs": [],
- "source": [
- "def normalize(X):\n",
- " return (X - tf.reduce_min(X)) / (tf.reduce_max(X) - tf.reduce_min(X))\n",
- "\n",
- "fig = plt.figure(figsize=(12, 8))\n",
- "gs = mpl.gridspec.GridSpec(nrows=1, ncols=3, width_ratios=[1, 2, 2])\n",
- "\n",
- "ax1 = fig.add_subplot(gs[0, 0])\n",
- "ax1.set_title(\"Input\", fontsize=14)\n",
- "ax1.imshow(X[0]) # plot the 1st image\n",
- "ax1.axis(\"off\")\n",
- "ax2 = fig.add_subplot(gs[0, 1])\n",
- "ax2.set_title(\"Upscaled\", fontsize=14)\n",
- "ax2.imshow(upscaled[0], interpolation=\"bicubic\")\n",
- "ax2.axis(\"off\")\n",
- "ax3 = fig.add_subplot(gs[0, 2])\n",
- "ax3.set_title(\"Output\", fontsize=14)\n",
- "ax3.imshow(normalize(manual_output[0, ..., :3]), interpolation=\"bicubic\") # plot the output for the 1st image\n",
- "ax3.axis(\"off\")\n",
- "plt.show()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 70,
- "metadata": {},
- "outputs": [],
- "source": [
- "np.allclose(output, manual_output.numpy(), atol=1e-7)"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
"source": [
"# Exercises"
]
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "iXYUCZlvpK0B"
+ },
"source": [
"## 1. to 8."
]
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "7gmFmNUjpK0B"
+ },
"source": [
- "See appendix A."
+ "1. These are the main advantages of a CNN over a fully connected DNN for image classification:\n",
+ " * Because consecutive layers are only partially connected and because it heavily reuses its weights, a CNN has many fewer parameters than a fully connected DNN, which makes it much faster to train, reduces the risk of overfitting, and requires much less training data.\n",
+ " * When a CNN has learned a kernel that can detect a particular feature, it can detect that feature anywhere in the image. In contrast, when a DNN learns a feature in one location, it can detect it only in that particular location. Since images typically have very repetitive features, CNNs are able to generalize much better than DNNs for image processing tasks such as classification, using fewer training examples.\n",
+ " * Finally, a DNN has no prior knowledge of how pixels are organized; it does not know that nearby pixels are close. A CNN's architecture embeds this prior knowledge. Lower layers typically identify features in small areas of the images, while higher layers combine the lower-level features into larger features. This works well with most natural images, giving CNNs a decisive head start compared to DNNs.\n",
+ "2. Let's compute how many parameters the CNN has.\n",
+ " * Since its first convolutional layer has 3 × 3 kernels, and the input has three channels (red, green, and blue), each feature map has 3 × 3 × 3 weights, plus a bias term. That's 28 parameters per feature map. Since this first convolutional layer has 100 feature maps, it has a total of 2,800 parameters. The second convolutional layer has 3 × 3 kernels and its input is the set of 100 feature maps of the previous layer, so each feature map has 3 × 3 × 100 = 900 weights, plus a bias term. Since it has 200 feature maps, this layer has 901 × 200 = 180,200 parameters. Finally, the third and last convolutional layer also has 3 × 3 kernels, and its input is the set of 200 feature maps of the previous layers, so each feature map has 3 × 3 × 200 = 1,800 weights, plus a bias term. Since it has 400 feature maps, this layer has a total of 1,801 × 400 = 720,400 parameters. All in all, the CNN has 2,800 + 180,200 + 720,400 = 903,400 parameters.
\n",
+ " * Now let's compute how much RAM this neural network will require (at least) when making a prediction for a single instance. First let's compute the feature map size for each layer. Since we are using a stride of 2 and `\"same\"` padding, the horizontal and vertical dimensions of the feature maps are divided by 2 at each layer (rounding up if necessary). So, as the input channels are 200 × 300 pixels, the first layer's feature maps are 100 × 150, the second layer's feature maps are 50 × 75, and the third layer's feature maps are 25 × 38. Since 32 bits is 4 bytes and the first convolutional layer has 100 feature maps, this first layer takes up 4 × 100 × 150 × 100 = 6 million bytes (6 MB). The second layer takes up 4 × 50 × 75 × 200 = 3 million bytes (3 MB). Finally, the third layer takes up 4 × 25 × 38 × 400 = 1,520,000 bytes (about 1.5 MB). However, once a layer has been computed, the memory occupied by the previous layer can be released, so if everything is well optimized, only 6 + 3 = 9 million bytes (9 MB) of RAM will be required (when the second layer has just been computed, but the memory occupied by the first layer has not been released yet). But wait, you also need to add the memory occupied by the CNN's parameters! We computed earlier that it has 903,400 parameters, each using up 4 bytes, so this adds 3,613,600 bytes (about 3.6 MB). The total RAM required is therefore (at least) 12,613,600 bytes (about 12.6 MB).
\n",
+ " * Lastly, let's compute the minimum amount of RAM required when training the CNN on a mini-batch of 50 images. During training TensorFlow uses backpropagation, which requires keeping all values computed during the forward pass until the reverse pass begins. So we must compute the total RAM required by all layers for a single instance and multiply that by 50. At this point, let's start counting in megabytes rather than bytes. We computed before that the three layers require respectively 6, 3, and 1.5 MB for each instance. That's a total of 10.5 MB per instance, so for 50 instances the total RAM required is 525 MB. Add to that the RAM required by the input images, which is 50 × 4 × 200 × 300 × 3 = 36 million bytes (36 MB), plus the RAM required for the model parameters, which is about 3.6 MB (computed earlier), plus some RAM for the gradients (we will neglect this since it can be released gradually as backpropagation goes down the layers during the reverse pass). We are up to a total of roughly 525 + 36 + 3.6 = 564.6 MB, and that's really an optimistic bare minimum.\n",
+ "3. If your GPU runs out of memory while training a CNN, here are five things you could try to solve the problem (other than purchasing a GPU with more RAM):\n",
+ " * Reduce the mini-batch size.\n",
+ " * Reduce dimensionality using a larger stride in one or more layers.\n",
+ " * Remove one or more layers.\n",
+ " * Use 16-bit floats instead of 32-bit floats.\n",
+ " * Distribute the CNN across multiple devices.\n",
+ "4. A max pooling layer has no parameters at all, whereas a convolutional layer has quite a few (see the previous questions).\n",
+ "5. A local response normalization layer makes the neurons that most strongly activate inhibit neurons at the same location but in neighboring feature maps, which encourages different feature maps to specialize and pushes them apart, forcing them to explore a wider range of features. It is typically used in the lower layers to have a larger pool of low-level features that the upper layers can build upon.\n",
+ "6. The main innovations in AlexNet compared to LeNet-5 are that it is much larger and deeper, and it stacks convolutional layers directly on top of each other, instead of stacking a pooling layer on top of each convolutional layer. The main innovation in GoogLeNet is the introduction of _inception modules_, which make it possible to have a much deeper net than previous CNN architectures, with fewer parameters. ResNet's main innovation is the introduction of skip connections, which make it possible to go well beyond 100 layers. Arguably, its simplicity and consistency are also rather innovative. SENet's main innovation was the idea of using an SE block (a two-layer dense network) after every inception module in an inception network or every residual unit in a ResNet to recalibrate the relative importance of feature maps. Xception's main innovation was the use of depthwise separable convolutional layers, which look at spatial patterns and depthwise patterns separately. Lastly, EfficientNet's main innotation was the compound scaling method, to efficiently scale a model to a larger compute budget.\n",
+ "7. Fully convolutional networks are neural networks composed exclusively of convolutional and pooling layers. FCNs can efficiently process images of any width and height (at least above the minimum size). They are most useful for object detection and semantic segmentation because they only need to look at the image once (instead of having to run a CNN multiple times on different parts of the image). If you have a CNN with some dense layers on top, you can convert these dense layers to convolutional layers to create an FCN: just replace the lowest dense layer with a convolutional layer with a kernel size equal to the layer's input size, with one filter per neuron in the dense layer, and using `\"valid\"` padding. Generally the stride should be 1, but you can set it to a higher value if you want. The activation function should be the same as the dense layer's. The other dense layers should be converted the same way, but using 1 × 1 filters. It is actually possible to convert a trained CNN this way by appropriately reshaping the dense layers' weight matrices.\n",
+ "8. The main technical difficulty of semantic segmentation is the fact that a lot of the spatial information gets lost in a CNN as the signal flows through each layer, especially in pooling layers and layers with a stride greater than 1. This spatial information needs to be restored somehow to accurately predict the class of each pixel."
]
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "KIpUNnvnpK0B"
+ },
"source": [
"## 9. High Accuracy CNN for MNIST\n",
"_Exercise: Build your own CNN from scratch and try to achieve the highest possible accuracy on MNIST._"
@@ -1349,15 +1570,23 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "FSsUWNuzpK0B"
+ },
"source": [
"The following model uses 2 convolutional layers, followed by 1 pooling layer, then dropout 25%, then a dense layer, another dropout layer but with 50% dropout, and finally the output layer. It reaches about 99.2% accuracy on the test set. This places this model roughly in the top 20% in the [MNIST Kaggle competition](https://www.kaggle.com/c/digit-recognizer/) (if we ignore the models with an accuracy greater than 99.79% which were most likely trained on the test set, as explained by Chris Deotte in [this post](https://www.kaggle.com/c/digit-recognizer/discussion/61480)). Can you do better? To reach 99.5 to 99.7% accuracy on the test set, you need to add image augmentation, batch norm, use a learning schedule such as 1-cycle, and possibly create an ensemble."
]
},
{
"cell_type": "code",
- "execution_count": 71,
- "metadata": {},
+ "execution_count": 61,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "6tdKYb9PpK0B",
+ "outputId": "37baf840-d76d-4d94-f692-524eef47a041"
+ },
"outputs": [],
"source": [
"(X_train_full, y_train_full), (X_test, y_test) = tf.keras.datasets.mnist.load_data()\n",
@@ -1373,8 +1602,14 @@
},
{
"cell_type": "code",
- "execution_count": 72,
- "metadata": {},
+ "execution_count": 62,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "uDchCzo3pK0B",
+ "outputId": "5e68d152-cd84-4451-db6c-d69fe7f18cc7"
+ },
"outputs": [],
"source": [
"tf.keras.backend.clear_session()\n",
@@ -1400,14 +1635,18 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "ax165YCQpK0B"
+ },
"source": [
"## 10. Use transfer learning for large image classification"
]
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "n5KdUYhHpK0B"
+ },
"source": [
"_Exercise: Use transfer learning for large image classification, going through these steps:_\n",
"\n",
@@ -1419,14 +1658,18 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "STW6EOmbpK0C"
+ },
"source": [
"See the Flowers example above."
]
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "Bl9zizRopK0C"
+ },
"source": [
"## 11.\n",
"_Exercise: Go through TensorFlow's [Style Transfer tutorial](https://homl.info/styletuto). It is a fun way to generate art using Deep Learning._\n"
@@ -1434,15 +1677,22 @@
},
{
"cell_type": "markdown",
- "metadata": {},
+ "metadata": {
+ "id": "W1yw_8PSpK0C"
+ },
"source": [
"Simply open the Colab and follow its instructions."
]
}
],
"metadata": {
+ "accelerator": "GPU",
+ "colab": {
+ "name": "14_deep_computer_vision_with_cnns.ipynb",
+ "provenance": []
+ },
"kernelspec": {
- "display_name": "Python 3 (ipykernel)",
+ "display_name": "Python 3",
"language": "python",
"name": "python3"
},
@@ -1467,6 +1717,352 @@
"toc_cell": false,
"toc_section_display": "block",
"toc_window_display": false
+ },
+ "widgets": {
+ "application/vnd.jupyter.widget-state+json": {
+ "0401482a18a94f22b95d5321bfa6f414": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "ProgressStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "1c08c78c0d484eed9638ad2b757ab584": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "2839afc6cb6d4a50b0bdad1fcb7f39d1": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HBoxModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_eefd1a01ef1c46e09ffbd97ad25377cf",
+ "IPY_MODEL_d142189db76a4681a22f38ae252e4ebc",
+ "IPY_MODEL_d441368305704ab9a3bdbe762ab340a4"
+ ],
+ "layout": "IPY_MODEL_1c08c78c0d484eed9638ad2b757ab584"
+ }
+ },
+ "54a90429726b4d848358cafae87ad893": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "57cbb645792f45adbfab9b29aa708809": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "DescriptionStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "8f0660be3bf44dd48fd42cd52a507e32": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "DescriptionStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "b681dc2200ad4ee397a46602e8f4f654": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "d142189db76a4681a22f38ae252e4ebc": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "FloatProgressModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "success",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_54a90429726b4d848358cafae87ad893",
+ "max": 5,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_0401482a18a94f22b95d5321bfa6f414",
+ "value": 5
+ }
+ },
+ "d441368305704ab9a3bdbe762ab340a4": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_f8ef3c06db574e3f88dc9a8c0bcd22ab",
+ "placeholder": "",
+ "style": "IPY_MODEL_8f0660be3bf44dd48fd42cd52a507e32",
+ "value": " 5/5 [00:10<00:00, 2.12s/ file]"
+ }
+ },
+ "eefd1a01ef1c46e09ffbd97ad25377cf": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_b681dc2200ad4ee397a46602e8f4f654",
+ "placeholder": "",
+ "style": "IPY_MODEL_57cbb645792f45adbfab9b29aa708809",
+ "value": "Dl Completed...: 100%"
+ }
+ },
+ "f8ef3c06db574e3f88dc9a8c0bcd22ab": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ }
+ }
}
},
"nbformat": 4,