Remove old TensorFlow chapter
parent
f0b432eb59
commit
6974508cf7
|
@ -1,993 +0,0 @@
|
||||||
{
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"**Chapter 10 – Introduction to Artificial Neural Networks**"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"_This notebook contains all the sample code and solutions to the exercises in chapter 10._"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"# Setup"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 1,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"# To support both python 2 and python 3\n",
|
|
||||||
"from __future__ import division, print_function, unicode_literals\n",
|
|
||||||
"\n",
|
|
||||||
"# Common imports\n",
|
|
||||||
"import numpy as np\n",
|
|
||||||
"import os\n",
|
|
||||||
"\n",
|
|
||||||
"# to make this notebook's output stable across runs\n",
|
|
||||||
"def reset_graph(seed=42):\n",
|
|
||||||
" tf.reset_default_graph()\n",
|
|
||||||
" tf.set_random_seed(seed)\n",
|
|
||||||
" np.random.seed(seed)\n",
|
|
||||||
"\n",
|
|
||||||
"# To plot pretty figures\n",
|
|
||||||
"%matplotlib inline\n",
|
|
||||||
"import matplotlib\n",
|
|
||||||
"import matplotlib.pyplot as plt\n",
|
|
||||||
"plt.rcParams['axes.labelsize'] = 14\n",
|
|
||||||
"plt.rcParams['xtick.labelsize'] = 12\n",
|
|
||||||
"plt.rcParams['ytick.labelsize'] = 12\n",
|
|
||||||
"\n",
|
|
||||||
"# Where to save the figures\n",
|
|
||||||
"PROJECT_ROOT_DIR = \".\"\n",
|
|
||||||
"CHAPTER_ID = \"ann\"\n",
|
|
||||||
"\n",
|
|
||||||
"def save_fig(fig_id, tight_layout=True):\n",
|
|
||||||
" path = os.path.join(PROJECT_ROOT_DIR, \"images\", CHAPTER_ID, fig_id + \".png\")\n",
|
|
||||||
" print(\"Saving figure\", fig_id)\n",
|
|
||||||
" if tight_layout:\n",
|
|
||||||
" plt.tight_layout()\n",
|
|
||||||
" plt.savefig(path, format='png', dpi=300)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"# Perceptrons"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"**Note**: we set `max_iter` and `tol` explicitly to avoid warnings about the fact that their default value will change in future versions of Scikit-Learn."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 2,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import numpy as np\n",
|
|
||||||
"from sklearn.datasets import load_iris\n",
|
|
||||||
"from sklearn.linear_model import Perceptron\n",
|
|
||||||
"\n",
|
|
||||||
"iris = load_iris()\n",
|
|
||||||
"X = iris.data[:, (2, 3)] # petal length, petal width\n",
|
|
||||||
"y = (iris.target == 0).astype(np.int)\n",
|
|
||||||
"\n",
|
|
||||||
"per_clf = Perceptron(max_iter=100, tol=-np.infty, random_state=42)\n",
|
|
||||||
"per_clf.fit(X, y)\n",
|
|
||||||
"\n",
|
|
||||||
"y_pred = per_clf.predict([[2, 0.5]])"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 3,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"y_pred"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 4,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"a = -per_clf.coef_[0][0] / per_clf.coef_[0][1]\n",
|
|
||||||
"b = -per_clf.intercept_ / per_clf.coef_[0][1]\n",
|
|
||||||
"\n",
|
|
||||||
"axes = [0, 5, 0, 2]\n",
|
|
||||||
"\n",
|
|
||||||
"x0, x1 = np.meshgrid(\n",
|
|
||||||
" np.linspace(axes[0], axes[1], 500).reshape(-1, 1),\n",
|
|
||||||
" np.linspace(axes[2], axes[3], 200).reshape(-1, 1),\n",
|
|
||||||
" )\n",
|
|
||||||
"X_new = np.c_[x0.ravel(), x1.ravel()]\n",
|
|
||||||
"y_predict = per_clf.predict(X_new)\n",
|
|
||||||
"zz = y_predict.reshape(x0.shape)\n",
|
|
||||||
"\n",
|
|
||||||
"plt.figure(figsize=(10, 4))\n",
|
|
||||||
"plt.plot(X[y==0, 0], X[y==0, 1], \"bs\", label=\"Not Iris-Setosa\")\n",
|
|
||||||
"plt.plot(X[y==1, 0], X[y==1, 1], \"yo\", label=\"Iris-Setosa\")\n",
|
|
||||||
"\n",
|
|
||||||
"plt.plot([axes[0], axes[1]], [a * axes[0] + b, a * axes[1] + b], \"k-\", linewidth=3)\n",
|
|
||||||
"from matplotlib.colors import ListedColormap\n",
|
|
||||||
"custom_cmap = ListedColormap(['#9898ff', '#fafab0'])\n",
|
|
||||||
"\n",
|
|
||||||
"plt.contourf(x0, x1, zz, cmap=custom_cmap)\n",
|
|
||||||
"plt.xlabel(\"Petal length\", fontsize=14)\n",
|
|
||||||
"plt.ylabel(\"Petal width\", fontsize=14)\n",
|
|
||||||
"plt.legend(loc=\"lower right\", fontsize=14)\n",
|
|
||||||
"plt.axis(axes)\n",
|
|
||||||
"\n",
|
|
||||||
"save_fig(\"perceptron_iris_plot\")\n",
|
|
||||||
"plt.show()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"# Activation functions"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 5,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"def sigmoid(z):\n",
|
|
||||||
" return 1 / (1 + np.exp(-z))\n",
|
|
||||||
"\n",
|
|
||||||
"def relu(z):\n",
|
|
||||||
" return np.maximum(0, z)\n",
|
|
||||||
"\n",
|
|
||||||
"def derivative(f, z, eps=0.000001):\n",
|
|
||||||
" return (f(z + eps) - f(z - eps))/(2 * eps)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 6,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"z = np.linspace(-5, 5, 200)\n",
|
|
||||||
"\n",
|
|
||||||
"plt.figure(figsize=(11,4))\n",
|
|
||||||
"\n",
|
|
||||||
"plt.subplot(121)\n",
|
|
||||||
"plt.plot(z, np.sign(z), \"r-\", linewidth=1, label=\"Step\")\n",
|
|
||||||
"plt.plot(z, sigmoid(z), \"g--\", linewidth=2, label=\"Sigmoid\")\n",
|
|
||||||
"plt.plot(z, np.tanh(z), \"b-\", linewidth=2, label=\"Tanh\")\n",
|
|
||||||
"plt.plot(z, relu(z), \"m-.\", linewidth=2, label=\"ReLU\")\n",
|
|
||||||
"plt.grid(True)\n",
|
|
||||||
"plt.legend(loc=\"center right\", fontsize=14)\n",
|
|
||||||
"plt.title(\"Activation functions\", fontsize=14)\n",
|
|
||||||
"plt.axis([-5, 5, -1.2, 1.2])\n",
|
|
||||||
"\n",
|
|
||||||
"plt.subplot(122)\n",
|
|
||||||
"plt.plot(z, derivative(np.sign, z), \"r-\", linewidth=1, label=\"Step\")\n",
|
|
||||||
"plt.plot(0, 0, \"ro\", markersize=5)\n",
|
|
||||||
"plt.plot(0, 0, \"rx\", markersize=10)\n",
|
|
||||||
"plt.plot(z, derivative(sigmoid, z), \"g--\", linewidth=2, label=\"Sigmoid\")\n",
|
|
||||||
"plt.plot(z, derivative(np.tanh, z), \"b-\", linewidth=2, label=\"Tanh\")\n",
|
|
||||||
"plt.plot(z, derivative(relu, z), \"m-.\", linewidth=2, label=\"ReLU\")\n",
|
|
||||||
"plt.grid(True)\n",
|
|
||||||
"#plt.legend(loc=\"center right\", fontsize=14)\n",
|
|
||||||
"plt.title(\"Derivatives\", fontsize=14)\n",
|
|
||||||
"plt.axis([-5, 5, -0.2, 1.2])\n",
|
|
||||||
"\n",
|
|
||||||
"save_fig(\"activation_functions_plot\")\n",
|
|
||||||
"plt.show()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 7,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"def heaviside(z):\n",
|
|
||||||
" return (z >= 0).astype(z.dtype)\n",
|
|
||||||
"\n",
|
|
||||||
"def mlp_xor(x1, x2, activation=heaviside):\n",
|
|
||||||
" return activation(-activation(x1 + x2 - 1.5) + activation(x1 + x2 - 0.5) - 0.5)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 8,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"x1s = np.linspace(-0.2, 1.2, 100)\n",
|
|
||||||
"x2s = np.linspace(-0.2, 1.2, 100)\n",
|
|
||||||
"x1, x2 = np.meshgrid(x1s, x2s)\n",
|
|
||||||
"\n",
|
|
||||||
"z1 = mlp_xor(x1, x2, activation=heaviside)\n",
|
|
||||||
"z2 = mlp_xor(x1, x2, activation=sigmoid)\n",
|
|
||||||
"\n",
|
|
||||||
"plt.figure(figsize=(10,4))\n",
|
|
||||||
"\n",
|
|
||||||
"plt.subplot(121)\n",
|
|
||||||
"plt.contourf(x1, x2, z1)\n",
|
|
||||||
"plt.plot([0, 1], [0, 1], \"gs\", markersize=20)\n",
|
|
||||||
"plt.plot([0, 1], [1, 0], \"y^\", markersize=20)\n",
|
|
||||||
"plt.title(\"Activation function: heaviside\", fontsize=14)\n",
|
|
||||||
"plt.grid(True)\n",
|
|
||||||
"\n",
|
|
||||||
"plt.subplot(122)\n",
|
|
||||||
"plt.contourf(x1, x2, z2)\n",
|
|
||||||
"plt.plot([0, 1], [0, 1], \"gs\", markersize=20)\n",
|
|
||||||
"plt.plot([0, 1], [1, 0], \"y^\", markersize=20)\n",
|
|
||||||
"plt.title(\"Activation function: sigmoid\", fontsize=14)\n",
|
|
||||||
"plt.grid(True)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"# FNN for MNIST"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Using the Estimator API (formerly `tf.contrib.learn`)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 9,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import tensorflow as tf"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"**Warning**: `tf.examples.tutorials.mnist` is deprecated. We will use `tf.keras.datasets.mnist` instead. Moreover, the `tf.contrib.learn` API was promoted to `tf.estimators` and `tf.feature_columns`, and it has changed considerably. In particular, there is no `infer_real_valued_columns_from_input()` function or `SKCompat` class."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 10,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()\n",
|
|
||||||
"X_train = X_train.astype(np.float32).reshape(-1, 28*28) / 255.0\n",
|
|
||||||
"X_test = X_test.astype(np.float32).reshape(-1, 28*28) / 255.0\n",
|
|
||||||
"y_train = y_train.astype(np.int32)\n",
|
|
||||||
"y_test = y_test.astype(np.int32)\n",
|
|
||||||
"X_valid, X_train = X_train[:5000], X_train[5000:]\n",
|
|
||||||
"y_valid, y_train = y_train[:5000], y_train[5000:]"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 11,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"feature_cols = [tf.feature_column.numeric_column(\"X\", shape=[28 * 28])]\n",
|
|
||||||
"dnn_clf = tf.estimator.DNNClassifier(hidden_units=[300,100], n_classes=10,\n",
|
|
||||||
" feature_columns=feature_cols)\n",
|
|
||||||
"\n",
|
|
||||||
"input_fn = tf.estimator.inputs.numpy_input_fn(\n",
|
|
||||||
" x={\"X\": X_train}, y=y_train, num_epochs=40, batch_size=50, shuffle=True)\n",
|
|
||||||
"dnn_clf.train(input_fn=input_fn)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 12,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"test_input_fn = tf.estimator.inputs.numpy_input_fn(\n",
|
|
||||||
" x={\"X\": X_test}, y=y_test, shuffle=False)\n",
|
|
||||||
"eval_results = dnn_clf.evaluate(input_fn=test_input_fn)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 13,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"eval_results"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 14,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"y_pred_iter = dnn_clf.predict(input_fn=test_input_fn)\n",
|
|
||||||
"y_pred = list(y_pred_iter)\n",
|
|
||||||
"y_pred[0]"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {
|
|
||||||
"collapsed": true
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"## Using plain TensorFlow"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 15,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import tensorflow as tf\n",
|
|
||||||
"\n",
|
|
||||||
"n_inputs = 28*28 # MNIST\n",
|
|
||||||
"n_hidden1 = 300\n",
|
|
||||||
"n_hidden2 = 100\n",
|
|
||||||
"n_outputs = 10"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 16,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"reset_graph()\n",
|
|
||||||
"\n",
|
|
||||||
"X = tf.placeholder(tf.float32, shape=(None, n_inputs), name=\"X\")\n",
|
|
||||||
"y = tf.placeholder(tf.int32, shape=(None), name=\"y\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 17,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"def neuron_layer(X, n_neurons, name, activation=None):\n",
|
|
||||||
" with tf.name_scope(name):\n",
|
|
||||||
" n_inputs = int(X.get_shape()[1])\n",
|
|
||||||
" stddev = 2 / np.sqrt(n_inputs)\n",
|
|
||||||
" init = tf.truncated_normal((n_inputs, n_neurons), stddev=stddev)\n",
|
|
||||||
" W = tf.Variable(init, name=\"kernel\")\n",
|
|
||||||
" b = tf.Variable(tf.zeros([n_neurons]), name=\"bias\")\n",
|
|
||||||
" Z = tf.matmul(X, W) + b\n",
|
|
||||||
" if activation is not None:\n",
|
|
||||||
" return activation(Z)\n",
|
|
||||||
" else:\n",
|
|
||||||
" return Z"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 18,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"with tf.name_scope(\"dnn\"):\n",
|
|
||||||
" hidden1 = neuron_layer(X, n_hidden1, name=\"hidden1\",\n",
|
|
||||||
" activation=tf.nn.relu)\n",
|
|
||||||
" hidden2 = neuron_layer(hidden1, n_hidden2, name=\"hidden2\",\n",
|
|
||||||
" activation=tf.nn.relu)\n",
|
|
||||||
" logits = neuron_layer(hidden2, n_outputs, name=\"outputs\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 19,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"with tf.name_scope(\"loss\"):\n",
|
|
||||||
" xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,\n",
|
|
||||||
" logits=logits)\n",
|
|
||||||
" loss = tf.reduce_mean(xentropy, name=\"loss\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 20,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"learning_rate = 0.01\n",
|
|
||||||
"\n",
|
|
||||||
"with tf.name_scope(\"train\"):\n",
|
|
||||||
" optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n",
|
|
||||||
" training_op = optimizer.minimize(loss)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 21,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"with tf.name_scope(\"eval\"):\n",
|
|
||||||
" correct = tf.nn.in_top_k(logits, y, 1)\n",
|
|
||||||
" accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 22,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"init = tf.global_variables_initializer()\n",
|
|
||||||
"saver = tf.train.Saver()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 23,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"n_epochs = 40\n",
|
|
||||||
"batch_size = 50"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 24,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"def shuffle_batch(X, y, batch_size):\n",
|
|
||||||
" rnd_idx = np.random.permutation(len(X))\n",
|
|
||||||
" n_batches = len(X) // batch_size\n",
|
|
||||||
" for batch_idx in np.array_split(rnd_idx, n_batches):\n",
|
|
||||||
" X_batch, y_batch = X[batch_idx], y[batch_idx]\n",
|
|
||||||
" yield X_batch, y_batch"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 25,
|
|
||||||
"metadata": {
|
|
||||||
"scrolled": true
|
|
||||||
},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"with tf.Session() as sess:\n",
|
|
||||||
" init.run()\n",
|
|
||||||
" for epoch in range(n_epochs):\n",
|
|
||||||
" for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):\n",
|
|
||||||
" sess.run(training_op, feed_dict={X: X_batch, y: y_batch})\n",
|
|
||||||
" acc_batch = accuracy.eval(feed_dict={X: X_batch, y: y_batch})\n",
|
|
||||||
" acc_val = accuracy.eval(feed_dict={X: X_valid, y: y_valid})\n",
|
|
||||||
" print(epoch, \"Batch accuracy:\", acc_batch, \"Val accuracy:\", acc_val)\n",
|
|
||||||
"\n",
|
|
||||||
" save_path = saver.save(sess, \"./my_model_final.ckpt\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 26,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"with tf.Session() as sess:\n",
|
|
||||||
" saver.restore(sess, \"./my_model_final.ckpt\") # or better, use save_path\n",
|
|
||||||
" X_new_scaled = X_test[:20]\n",
|
|
||||||
" Z = logits.eval(feed_dict={X: X_new_scaled})\n",
|
|
||||||
" y_pred = np.argmax(Z, axis=1)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 27,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"print(\"Predicted classes:\", y_pred)\n",
|
|
||||||
"print(\"Actual classes: \", y_test[:20])"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 28,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from tensorflow_graph_in_jupyter import show_graph"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 29,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"show_graph(tf.get_default_graph())"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## Using `dense()` instead of `neuron_layer()`"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Note: previous releases of the book used `tensorflow.contrib.layers.fully_connected()` rather than `tf.layers.dense()` (which did not exist when this chapter was written). It is now preferable to use `tf.layers.dense()`, because anything in the contrib module may change or be deleted without notice. The `dense()` function is almost identical to the `fully_connected()` function, except for a few minor differences:\n",
|
|
||||||
"* several parameters are renamed: `scope` becomes `name`, `activation_fn` becomes `activation` (and similarly the `_fn` suffix is removed from other parameters such as `normalizer_fn`), `weights_initializer` becomes `kernel_initializer`, etc.\n",
|
|
||||||
"* the default `activation` is now `None` rather than `tf.nn.relu`.\n",
|
|
||||||
"* a few more differences are presented in chapter 11."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 30,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"n_inputs = 28*28 # MNIST\n",
|
|
||||||
"n_hidden1 = 300\n",
|
|
||||||
"n_hidden2 = 100\n",
|
|
||||||
"n_outputs = 10"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 31,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"reset_graph()\n",
|
|
||||||
"\n",
|
|
||||||
"X = tf.placeholder(tf.float32, shape=(None, n_inputs), name=\"X\")\n",
|
|
||||||
"y = tf.placeholder(tf.int32, shape=(None), name=\"y\") "
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 32,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"with tf.name_scope(\"dnn\"):\n",
|
|
||||||
" hidden1 = tf.layers.dense(X, n_hidden1, name=\"hidden1\",\n",
|
|
||||||
" activation=tf.nn.relu)\n",
|
|
||||||
" hidden2 = tf.layers.dense(hidden1, n_hidden2, name=\"hidden2\",\n",
|
|
||||||
" activation=tf.nn.relu)\n",
|
|
||||||
" logits = tf.layers.dense(hidden2, n_outputs, name=\"outputs\")\n",
|
|
||||||
" y_proba = tf.nn.softmax(logits)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 33,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"with tf.name_scope(\"loss\"):\n",
|
|
||||||
" xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)\n",
|
|
||||||
" loss = tf.reduce_mean(xentropy, name=\"loss\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 34,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"learning_rate = 0.01\n",
|
|
||||||
"\n",
|
|
||||||
"with tf.name_scope(\"train\"):\n",
|
|
||||||
" optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n",
|
|
||||||
" training_op = optimizer.minimize(loss)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 35,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"with tf.name_scope(\"eval\"):\n",
|
|
||||||
" correct = tf.nn.in_top_k(logits, y, 1)\n",
|
|
||||||
" accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 36,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"init = tf.global_variables_initializer()\n",
|
|
||||||
"saver = tf.train.Saver()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 37,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"n_epochs = 20\n",
|
|
||||||
"n_batches = 50\n",
|
|
||||||
"\n",
|
|
||||||
"with tf.Session() as sess:\n",
|
|
||||||
" init.run()\n",
|
|
||||||
" for epoch in range(n_epochs):\n",
|
|
||||||
" for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):\n",
|
|
||||||
" sess.run(training_op, feed_dict={X: X_batch, y: y_batch})\n",
|
|
||||||
" acc_batch = accuracy.eval(feed_dict={X: X_batch, y: y_batch})\n",
|
|
||||||
" acc_valid = accuracy.eval(feed_dict={X: X_valid, y: y_valid})\n",
|
|
||||||
" print(epoch, \"Batch accuracy:\", acc_batch, \"Validation accuracy:\", acc_valid)\n",
|
|
||||||
"\n",
|
|
||||||
" save_path = saver.save(sess, \"./my_model_final.ckpt\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 38,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"show_graph(tf.get_default_graph())"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {
|
|
||||||
"collapsed": true
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"# Exercise solutions"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## 1. to 8."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {
|
|
||||||
"collapsed": true
|
|
||||||
},
|
|
||||||
"source": [
|
|
||||||
"See appendix A."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"## 9."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"_Train a deep MLP on the MNIST dataset and see if you can get over 98% precision. Just like in the last exercise of chapter 9, try adding all the bells and whistles (i.e., save checkpoints, restore the last checkpoint in case of an interruption, add summaries, plot learning curves using TensorBoard, and so on)._"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"First let's create the deep net. It's exactly the same as earlier, with just one addition: we add a `tf.summary.scalar()` to track the loss and the accuracy during training, so we can view nice learning curves using TensorBoard."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 40,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"n_inputs = 28*28 # MNIST\n",
|
|
||||||
"n_hidden1 = 300\n",
|
|
||||||
"n_hidden2 = 100\n",
|
|
||||||
"n_outputs = 10"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 41,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"reset_graph()\n",
|
|
||||||
"\n",
|
|
||||||
"X = tf.placeholder(tf.float32, shape=(None, n_inputs), name=\"X\")\n",
|
|
||||||
"y = tf.placeholder(tf.int32, shape=(None), name=\"y\") "
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 42,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"with tf.name_scope(\"dnn\"):\n",
|
|
||||||
" hidden1 = tf.layers.dense(X, n_hidden1, name=\"hidden1\",\n",
|
|
||||||
" activation=tf.nn.relu)\n",
|
|
||||||
" hidden2 = tf.layers.dense(hidden1, n_hidden2, name=\"hidden2\",\n",
|
|
||||||
" activation=tf.nn.relu)\n",
|
|
||||||
" logits = tf.layers.dense(hidden2, n_outputs, name=\"outputs\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 43,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"with tf.name_scope(\"loss\"):\n",
|
|
||||||
" xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)\n",
|
|
||||||
" loss = tf.reduce_mean(xentropy, name=\"loss\")\n",
|
|
||||||
" loss_summary = tf.summary.scalar('log_loss', loss)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 44,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"learning_rate = 0.01\n",
|
|
||||||
"\n",
|
|
||||||
"with tf.name_scope(\"train\"):\n",
|
|
||||||
" optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n",
|
|
||||||
" training_op = optimizer.minimize(loss)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 45,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"with tf.name_scope(\"eval\"):\n",
|
|
||||||
" correct = tf.nn.in_top_k(logits, y, 1)\n",
|
|
||||||
" accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n",
|
|
||||||
" accuracy_summary = tf.summary.scalar('accuracy', accuracy)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 46,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"init = tf.global_variables_initializer()\n",
|
|
||||||
"saver = tf.train.Saver()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Now we need to define the directory to write the TensorBoard logs to:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 47,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"from datetime import datetime\n",
|
|
||||||
"\n",
|
|
||||||
"def log_dir(prefix=\"\"):\n",
|
|
||||||
" now = datetime.utcnow().strftime(\"%Y%m%d%H%M%S\")\n",
|
|
||||||
" root_logdir = \"tf_logs\"\n",
|
|
||||||
" if prefix:\n",
|
|
||||||
" prefix += \"-\"\n",
|
|
||||||
" name = prefix + \"run-\" + now\n",
|
|
||||||
" return \"{}/{}/\".format(root_logdir, name)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 48,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"logdir = log_dir(\"mnist_dnn\")"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Now we can create the `FileWriter` that we will use to write the TensorBoard logs:"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 49,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"Hey! Why don't we implement early stopping? For this, we are going to need to use the validation set."
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 50,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"m, n = X_train.shape"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 52,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"n_epochs = 10001\n",
|
|
||||||
"batch_size = 50\n",
|
|
||||||
"n_batches = int(np.ceil(m / batch_size))\n",
|
|
||||||
"\n",
|
|
||||||
"checkpoint_path = \"/tmp/my_deep_mnist_model.ckpt\"\n",
|
|
||||||
"checkpoint_epoch_path = checkpoint_path + \".epoch\"\n",
|
|
||||||
"final_model_path = \"./my_deep_mnist_model\"\n",
|
|
||||||
"\n",
|
|
||||||
"best_loss = np.infty\n",
|
|
||||||
"epochs_without_progress = 0\n",
|
|
||||||
"max_epochs_without_progress = 50\n",
|
|
||||||
"\n",
|
|
||||||
"with tf.Session() as sess:\n",
|
|
||||||
" if os.path.isfile(checkpoint_epoch_path):\n",
|
|
||||||
" # if the checkpoint file exists, restore the model and load the epoch number\n",
|
|
||||||
" with open(checkpoint_epoch_path, \"rb\") as f:\n",
|
|
||||||
" start_epoch = int(f.read())\n",
|
|
||||||
" print(\"Training was interrupted. Continuing at epoch\", start_epoch)\n",
|
|
||||||
" saver.restore(sess, checkpoint_path)\n",
|
|
||||||
" else:\n",
|
|
||||||
" start_epoch = 0\n",
|
|
||||||
" sess.run(init)\n",
|
|
||||||
"\n",
|
|
||||||
" for epoch in range(start_epoch, n_epochs):\n",
|
|
||||||
" for X_batch, y_batch in shuffle_batch(X_train, y_train, batch_size):\n",
|
|
||||||
" sess.run(training_op, feed_dict={X: X_batch, y: y_batch})\n",
|
|
||||||
" accuracy_val, loss_val, accuracy_summary_str, loss_summary_str = sess.run([accuracy, loss, accuracy_summary, loss_summary], feed_dict={X: X_valid, y: y_valid})\n",
|
|
||||||
" file_writer.add_summary(accuracy_summary_str, epoch)\n",
|
|
||||||
" file_writer.add_summary(loss_summary_str, epoch)\n",
|
|
||||||
" if epoch % 5 == 0:\n",
|
|
||||||
" print(\"Epoch:\", epoch,\n",
|
|
||||||
" \"\\tValidation accuracy: {:.3f}%\".format(accuracy_val * 100),\n",
|
|
||||||
" \"\\tLoss: {:.5f}\".format(loss_val))\n",
|
|
||||||
" saver.save(sess, checkpoint_path)\n",
|
|
||||||
" with open(checkpoint_epoch_path, \"wb\") as f:\n",
|
|
||||||
" f.write(b\"%d\" % (epoch + 1))\n",
|
|
||||||
" if loss_val < best_loss:\n",
|
|
||||||
" saver.save(sess, final_model_path)\n",
|
|
||||||
" best_loss = loss_val\n",
|
|
||||||
" else:\n",
|
|
||||||
" epochs_without_progress += 5\n",
|
|
||||||
" if epochs_without_progress > max_epochs_without_progress:\n",
|
|
||||||
" print(\"Early stopping\")\n",
|
|
||||||
" break"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 53,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"os.remove(checkpoint_epoch_path)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 54,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"with tf.Session() as sess:\n",
|
|
||||||
" saver.restore(sess, final_model_path)\n",
|
|
||||||
" accuracy_val = accuracy.eval(feed_dict={X: X_test, y: y_test})"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 55,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"accuracy_val"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": []
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python3"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.6.6"
|
|
||||||
},
|
|
||||||
"nav_menu": {
|
|
||||||
"height": "264px",
|
|
||||||
"width": "369px"
|
|
||||||
},
|
|
||||||
"toc": {
|
|
||||||
"navigate_menu": true,
|
|
||||||
"number_sections": true,
|
|
||||||
"sideBar": true,
|
|
||||||
"threshold": 6,
|
|
||||||
"toc_cell": false,
|
|
||||||
"toc_section_display": "block",
|
|
||||||
"toc_window_display": false
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 1
|
|
||||||
}
|
|
Loading…
Reference in New Issue