Use np.random.set_seed(42) and tf.set_random_seed(42) to make notebook's output constant

main
Aurélien Geron 2017-06-08 15:43:16 +02:00
parent 8935c61570
commit 91acc2e1fd
1 changed files with 191 additions and 203 deletions

View File

@ -55,11 +55,13 @@
"\n",
"# Common imports\n",
"import numpy as np\n",
"import numpy.random as rnd\n",
"import os\n",
"\n",
"# to make this notebook's output stable across runs\n",
"rnd.seed(42)\n",
"def reset_graph(seed=42):\n",
" tf.reset_default_graph()\n",
" tf.set_random_seed(seed)\n",
" np.random.seed(seed)\n",
"\n",
"# To plot pretty figures\n",
"%matplotlib inline\n",
@ -192,7 +194,7 @@
},
"outputs": [],
"source": [
"tf.reset_default_graph()\n",
"reset_graph()\n",
"\n",
"X = tf.placeholder(tf.float32, shape=(None, height, width, 1))\n",
"feature_maps = tf.constant(fmap)\n",
@ -307,7 +309,9 @@
"cell_type": "code",
"execution_count": 12,
"metadata": {
"collapsed": false
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
@ -319,7 +323,10 @@
},
{
"cell_type": "markdown",
"metadata": {},
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"Using `tf.layers.conv2d()`:"
]
@ -328,21 +335,14 @@
"cell_type": "code",
"execution_count": 13,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"tf.reset_default_graph()"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {
"collapsed": false
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"reset_graph()\n",
"\n",
"X = tf.placeholder(shape=(None, height, width, channels), dtype=tf.float32)\n",
"conv = tf.layers.conv2d(X, filters=2, kernel_size=7, strides=[2,2],\n",
" padding=\"SAME\")"
@ -350,9 +350,11 @@
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": 14,
"metadata": {
"collapsed": true
"collapsed": true,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
@ -365,9 +367,11 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 15,
"metadata": {
"collapsed": false
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
@ -387,7 +391,7 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 16,
"metadata": {
"collapsed": false,
"deletable": true,
@ -395,7 +399,7 @@
},
"outputs": [],
"source": [
"tf.reset_default_graph()\n",
"reset_graph()\n",
"\n",
"filter_primes = np.array([2., 3., 5., 7., 11., 13.], dtype=np.float32)\n",
"x = tf.constant(np.arange(1, 13+1, dtype=np.float32).reshape([1, 1, 13, 1]))\n",
@ -411,7 +415,7 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 17,
"metadata": {
"collapsed": false,
"deletable": true,
@ -440,9 +444,11 @@
},
{
"cell_type": "code",
"execution_count": 19,
"execution_count": 18,
"metadata": {
"collapsed": true
"collapsed": true,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
@ -455,7 +461,7 @@
},
{
"cell_type": "code",
"execution_count": 20,
"execution_count": 19,
"metadata": {
"collapsed": false,
"deletable": true,
@ -475,9 +481,11 @@
},
{
"cell_type": "code",
"execution_count": 21,
"execution_count": 20,
"metadata": {
"collapsed": false
"collapsed": false,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
@ -528,7 +536,7 @@
},
{
"cell_type": "code",
"execution_count": 22,
"execution_count": 21,
"metadata": {
"collapsed": false,
"deletable": true,
@ -556,45 +564,49 @@
"n_fc1 = 64\n",
"n_outputs = 10\n",
"\n",
"graph = tf.Graph()\n",
"with graph.as_default():\n",
" with tf.name_scope(\"inputs\"):\n",
"reset_graph()\n",
"\n",
"with tf.name_scope(\"inputs\"):\n",
" X = tf.placeholder(tf.float32, shape=[None, n_inputs], name=\"X\")\n",
" X_reshaped = tf.reshape(X, shape=[-1, height, width, channels])\n",
" y = tf.placeholder(tf.int32, shape=[None], name=\"y\")\n",
"\n",
" conv1 = tf.layers.conv2d(X_reshaped, filters=conv1_fmaps, kernel_size=conv1_ksize, strides=conv1_stride, padding=conv1_pad, activation=tf.nn.relu, name=\"conv1\")\n",
" conv2 = tf.layers.conv2d(conv1, filters=conv2_fmaps, kernel_size=conv2_ksize, strides=conv2_stride, padding=conv2_pad, activation=tf.nn.relu, name=\"conv2\")\n",
"conv1 = tf.layers.conv2d(X_reshaped, filters=conv1_fmaps, kernel_size=conv1_ksize,\n",
" strides=conv1_stride, padding=conv1_pad,\n",
" activation=tf.nn.relu, name=\"conv1\")\n",
"conv2 = tf.layers.conv2d(conv1, filters=conv2_fmaps, kernel_size=conv2_ksize,\n",
" strides=conv2_stride, padding=conv2_pad,\n",
" activation=tf.nn.relu, name=\"conv2\")\n",
"\n",
" with tf.name_scope(\"pool3\"):\n",
"with tf.name_scope(\"pool3\"):\n",
" pool3 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"VALID\")\n",
" pool3_flat = tf.reshape(pool3, shape=[-1, pool3_fmaps * 7 * 7])\n",
"\n",
" with tf.name_scope(\"fc1\"):\n",
"with tf.name_scope(\"fc1\"):\n",
" fc1 = tf.layers.dense(pool3_flat, n_fc1, activation=tf.nn.relu, name=\"fc1\")\n",
"\n",
" with tf.name_scope(\"output\"):\n",
"with tf.name_scope(\"output\"):\n",
" logits = tf.layers.dense(fc1, n_outputs, name=\"output\")\n",
" Y_proba = tf.nn.softmax(logits, name=\"Y_proba\")\n",
"\n",
" with tf.name_scope(\"train\"):\n",
"with tf.name_scope(\"train\"):\n",
" xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y)\n",
" loss = tf.reduce_mean(xentropy)\n",
" optimizer = tf.train.AdamOptimizer()\n",
" training_op = optimizer.minimize(loss)\n",
"\n",
" with tf.name_scope(\"eval\"):\n",
"with tf.name_scope(\"eval\"):\n",
" correct = tf.nn.in_top_k(logits, y, 1)\n",
" accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n",
"\n",
" with tf.name_scope(\"init_and_save\"):\n",
"with tf.name_scope(\"init_and_save\"):\n",
" init = tf.global_variables_initializer()\n",
" saver = tf.train.Saver()"
]
},
{
"cell_type": "code",
"execution_count": 23,
"execution_count": 22,
"metadata": {
"collapsed": false,
"deletable": true,
@ -608,7 +620,7 @@
},
{
"cell_type": "code",
"execution_count": 24,
"execution_count": 23,
"metadata": {
"collapsed": false,
"deletable": true,
@ -619,7 +631,7 @@
"n_epochs = 10\n",
"batch_size = 100\n",
"\n",
"with tf.Session(graph=graph) as sess:\n",
"with tf.Session() as sess:\n",
" init.run()\n",
" for epoch in range(n_epochs):\n",
" for iteration in range(mnist.train.num_examples // batch_size):\n",
@ -686,9 +698,9 @@
},
{
"cell_type": "code",
"execution_count": 25,
"execution_count": 24,
"metadata": {
"collapsed": true,
"collapsed": false,
"deletable": true,
"editable": true
},
@ -719,41 +731,45 @@
"\n",
"n_outputs = 10\n",
"\n",
"graph = tf.Graph()\n",
"with graph.as_default():\n",
" with tf.name_scope(\"inputs\"):\n",
"reset_graph()\n",
"\n",
"with tf.name_scope(\"inputs\"):\n",
" X = tf.placeholder(tf.float32, shape=[None, n_inputs], name=\"X\")\n",
" X_reshaped = tf.reshape(X, shape=[-1, height, width, channels])\n",
" y = tf.placeholder(tf.int32, shape=[None], name=\"y\")\n",
" is_training = tf.placeholder_with_default(False, shape=[], name='is_training')\n",
" training = tf.placeholder_with_default(False, shape=[], name='training')\n",
"\n",
" conv1 = tf.layers.conv2d(X_reshaped, filters=conv1_fmaps, kernel_size=conv1_ksize, strides=conv1_stride, padding=conv1_pad, activation=tf.nn.relu, name=\"conv1\")\n",
" conv2 = tf.layers.conv2d(conv1, filters=conv2_fmaps, kernel_size=conv2_ksize, strides=conv2_stride, padding=conv2_pad, activation=tf.nn.relu, name=\"conv2\")\n",
"conv1 = tf.layers.conv2d(X_reshaped, filters=conv1_fmaps, kernel_size=conv1_ksize,\n",
" strides=conv1_stride, padding=conv1_pad,\n",
" activation=tf.nn.relu, name=\"conv1\")\n",
"conv2 = tf.layers.conv2d(conv1, filters=conv2_fmaps, kernel_size=conv2_ksize,\n",
" strides=conv2_stride, padding=conv2_pad,\n",
" activation=tf.nn.relu, name=\"conv2\")\n",
"\n",
" with tf.name_scope(\"pool3\"):\n",
"with tf.name_scope(\"pool3\"):\n",
" pool3 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"VALID\")\n",
" pool3_flat = tf.reshape(pool3, shape=[-1, pool3_fmaps * 14 * 14])\n",
" pool3_flat_drop = tf.layers.dropout(pool3_flat, conv2_dropout_rate, training=is_training)\n",
" pool3_flat_drop = tf.layers.dropout(pool3_flat, conv2_dropout_rate, training=training)\n",
"\n",
" with tf.name_scope(\"fc1\"):\n",
"with tf.name_scope(\"fc1\"):\n",
" fc1 = tf.layers.dense(pool3_flat_drop, n_fc1, activation=tf.nn.relu, name=\"fc1\")\n",
" fc1_drop = tf.layers.dropout(fc1, fc1_dropout_rate, training=is_training)\n",
" fc1_drop = tf.layers.dropout(fc1, fc1_dropout_rate, training=training)\n",
"\n",
" with tf.name_scope(\"output\"):\n",
"with tf.name_scope(\"output\"):\n",
" logits = tf.layers.dense(fc1, n_outputs, name=\"output\")\n",
" Y_proba = tf.nn.softmax(logits, name=\"Y_proba\")\n",
"\n",
" with tf.name_scope(\"train\"):\n",
"with tf.name_scope(\"train\"):\n",
" xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y)\n",
" loss = tf.reduce_mean(xentropy)\n",
" optimizer = tf.train.AdamOptimizer()\n",
" training_op = optimizer.minimize(loss)\n",
"\n",
" with tf.name_scope(\"eval\"):\n",
"with tf.name_scope(\"eval\"):\n",
" correct = tf.nn.in_top_k(logits, y, 1)\n",
" accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n",
"\n",
" with tf.name_scope(\"init_and_save\"):\n",
"with tf.name_scope(\"init_and_save\"):\n",
" init = tf.global_variables_initializer()\n",
" saver = tf.train.Saver()"
]
@ -770,7 +786,7 @@
},
{
"cell_type": "code",
"execution_count": 26,
"execution_count": 25,
"metadata": {
"collapsed": false,
"deletable": true,
@ -794,7 +810,7 @@
},
{
"cell_type": "code",
"execution_count": 27,
"execution_count": 26,
"metadata": {
"collapsed": true,
"deletable": true,
@ -815,32 +831,6 @@
" tf.get_default_session().run(assign_ops, feed_dict=feed_dict)"
]
},
{
"cell_type": "markdown",
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"We need a validation set for Early Stopping, so we take 2,000 instances from the test set for this purpose."
]
},
{
"cell_type": "code",
"execution_count": 28,
"metadata": {
"collapsed": true,
"deletable": true,
"editable": true
},
"outputs": [],
"source": [
"X_val = mnist.test.images[:2000]\n",
"y_val = mnist.test.labels[:2000]\n",
"X_test = mnist.test.images[2000:]\n",
"y_test = mnist.test.labels[2000:]"
]
},
{
"cell_type": "markdown",
"metadata": {
@ -857,7 +847,7 @@
},
{
"cell_type": "code",
"execution_count": 29,
"execution_count": 27,
"metadata": {
"collapsed": false,
"deletable": true,
@ -868,36 +858,40 @@
"n_epochs = 1000\n",
"batch_size = 50\n",
"\n",
"best_acc_val = 0\n",
"check_interval = 100\n",
"best_loss_val = np.infty\n",
"check_interval = 500\n",
"checks_since_last_progress = 0\n",
"max_checks_without_progress = 100\n",
"max_checks_without_progress = 20\n",
"best_model_params = None \n",
"\n",
"with tf.Session(graph=graph) as sess:\n",
"with tf.Session() as sess:\n",
" init.run()\n",
" for epoch in range(n_epochs):\n",
" for iteration in range(mnist.train.num_examples // batch_size):\n",
" X_batch, y_batch = mnist.train.next_batch(batch_size)\n",
" sess.run(training_op, feed_dict={X: X_batch, y: y_batch, is_training: True})\n",
" sess.run(training_op, feed_dict={X: X_batch, y: y_batch, training: True})\n",
" if iteration % check_interval == 0:\n",
" acc_val = accuracy.eval(feed_dict={X: X_val, y: y_val})\n",
" if acc_val > best_acc_val:\n",
" best_acc_val = acc_val\n",
" loss_val = loss.eval(feed_dict={X: mnist.validation.images,\n",
" y: mnist.validation.labels})\n",
" if loss_val < best_loss_val:\n",
" best_loss_val = loss_val\n",
" checks_since_last_progress = 0\n",
" best_model_params = get_model_params()\n",
" else:\n",
" checks_since_last_progress += 1\n",
" acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})\n",
" acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})\n",
" print(epoch, \"Train accuracy:\", acc_train, \"Test accuracy:\", acc_test, \"Best validation accuracy:\", best_acc_val)\n",
" acc_val = accuracy.eval(feed_dict={X: mnist.validation.images,\n",
" y: mnist.validation.labels})\n",
" print(\"Epoch {}, train accuracy: {:.4f}%, valid. accuracy: {:.4f}%, valid. best loss: {:.6f}\".format(\n",
" epoch, acc_train * 100, acc_val * 100, best_loss_val))\n",
" if checks_since_last_progress > max_checks_without_progress:\n",
" print(\"Early stopping!\")\n",
" break\n",
"\n",
" if best_model_params:\n",
" restore_model_params(best_model_params)\n",
" acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})\n",
" acc_test = accuracy.eval(feed_dict={X: mnist.test.images,\n",
" y: mnist.test.labels})\n",
" print(\"Final accuracy on test set:\", acc_test)\n",
" save_path = saver.save(sess, \"./my_mnist_model\")"
]
@ -918,7 +912,7 @@
},
{
"cell_type": "code",
"execution_count": 30,
"execution_count": 28,
"metadata": {
"collapsed": true,
"deletable": true,
@ -933,7 +927,7 @@
},
{
"cell_type": "code",
"execution_count": 31,
"execution_count": 29,
"metadata": {
"collapsed": false,
"deletable": true,
@ -961,7 +955,7 @@
},
{
"cell_type": "code",
"execution_count": 32,
"execution_count": 30,
"metadata": {
"collapsed": true,
"deletable": true,
@ -997,7 +991,7 @@
},
{
"cell_type": "code",
"execution_count": 33,
"execution_count": 31,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1010,7 +1004,7 @@
},
{
"cell_type": "code",
"execution_count": 34,
"execution_count": 32,
"metadata": {
"collapsed": true,
"deletable": true,
@ -1030,7 +1024,7 @@
},
{
"cell_type": "code",
"execution_count": 35,
"execution_count": 33,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1043,7 +1037,7 @@
},
{
"cell_type": "code",
"execution_count": 36,
"execution_count": 34,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1067,18 +1061,7 @@
},
{
"cell_type": "code",
"execution_count": 37,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"tf.reset_default_graph()"
]
},
{
"cell_type": "code",
"execution_count": 38,
"execution_count": 35,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1089,6 +1072,8 @@
"from tensorflow.contrib.slim.nets import inception\n",
"import tensorflow.contrib.slim as slim\n",
"\n",
"reset_graph()\n",
"\n",
"X = tf.placeholder(tf.float32, shape=[None, 299, 299, 3], name=\"X\")\n",
"with slim.arg_scope(inception.inception_v3_arg_scope()):\n",
" logits, end_points = inception.inception_v3(\n",
@ -1110,7 +1095,7 @@
},
{
"cell_type": "code",
"execution_count": 39,
"execution_count": 36,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1136,7 +1121,7 @@
},
{
"cell_type": "code",
"execution_count": 40,
"execution_count": 37,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1153,7 +1138,7 @@
},
{
"cell_type": "code",
"execution_count": 41,
"execution_count": 38,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1167,7 +1152,7 @@
},
{
"cell_type": "code",
"execution_count": 42,
"execution_count": 39,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1180,7 +1165,7 @@
},
{
"cell_type": "code",
"execution_count": 43,
"execution_count": 40,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1230,7 +1215,7 @@
},
{
"cell_type": "code",
"execution_count": 44,
"execution_count": 41,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1259,7 +1244,7 @@
},
{
"cell_type": "code",
"execution_count": 45,
"execution_count": 42,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1282,7 +1267,7 @@
},
{
"cell_type": "code",
"execution_count": 46,
"execution_count": 43,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1308,7 +1293,7 @@
},
{
"cell_type": "code",
"execution_count": 47,
"execution_count": 44,
"metadata": {
"collapsed": true,
"deletable": true,
@ -1339,7 +1324,7 @@
},
{
"cell_type": "code",
"execution_count": 48,
"execution_count": 45,
"metadata": {
"collapsed": true,
"deletable": true,
@ -1363,7 +1348,7 @@
},
{
"cell_type": "code",
"execution_count": 49,
"execution_count": 46,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1427,7 +1412,7 @@
},
{
"cell_type": "code",
"execution_count": 50,
"execution_count": 47,
"metadata": {
"collapsed": true,
"deletable": true,
@ -1451,13 +1436,13 @@
" \n",
" # Now let's shrink this bounding box by a random factor (dividing the dimensions by a random number\n",
" # between 1.0 and 1.0 + `max_zoom`.\n",
" resize_factor = rnd.rand() * max_zoom + 1.0\n",
" resize_factor = np.random.rand() * max_zoom + 1.0\n",
" crop_width = int(crop_width / resize_factor)\n",
" crop_height = int(crop_height / resize_factor)\n",
" \n",
" # Next, we can select a random location on the image for this bounding box.\n",
" x0 = rnd.randint(0, width - crop_width)\n",
" y0 = rnd.randint(0, height - crop_height)\n",
" x0 = np.random.randint(0, width - crop_width)\n",
" y0 = np.random.randint(0, height - crop_height)\n",
" x1 = x0 + crop_width\n",
" y1 = y0 + crop_height\n",
" \n",
@ -1465,7 +1450,7 @@
" image = image[y0:y1, x0:x1]\n",
"\n",
" # Let's also flip the image horizontally with 50% probability:\n",
" if rnd.rand() < 0.5:\n",
" if np.random.rand() < 0.5:\n",
" image = np.fliplr(image)\n",
"\n",
" # Now, let's resize the image to the target dimensions.\n",
@ -1498,7 +1483,7 @@
},
{
"cell_type": "code",
"execution_count": 51,
"execution_count": 48,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1525,7 +1510,7 @@
},
{
"cell_type": "code",
"execution_count": 52,
"execution_count": 49,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1554,7 +1539,7 @@
},
{
"cell_type": "code",
"execution_count": 53,
"execution_count": 50,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1597,7 +1582,7 @@
},
{
"cell_type": "code",
"execution_count": 54,
"execution_count": 51,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1658,7 +1643,7 @@
},
{
"cell_type": "code",
"execution_count": 55,
"execution_count": 52,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1666,7 +1651,7 @@
},
"outputs": [],
"source": [
"tf.reset_default_graph()\n",
"reset_graph()\n",
"\n",
"input_image = tf.placeholder(tf.uint8, shape=[None, None, 3])\n",
"prepared_image_op = prepare_image_with_tensorflow(input_image)\n",
@ -1714,7 +1699,7 @@
},
{
"cell_type": "code",
"execution_count": 56,
"execution_count": 53,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1725,7 +1710,7 @@
"from tensorflow.contrib.slim.nets import inception\n",
"import tensorflow.contrib.slim as slim\n",
"\n",
"tf.reset_default_graph()\n",
"reset_graph()\n",
"\n",
"X = tf.placeholder(tf.float32, shape=[None, height, width, channels], name=\"X\")\n",
"training = tf.placeholder_with_default(False, shape=[])\n",
@ -1747,7 +1732,7 @@
},
{
"cell_type": "code",
"execution_count": 57,
"execution_count": 54,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1770,7 +1755,7 @@
},
{
"cell_type": "code",
"execution_count": 58,
"execution_count": 55,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1793,7 +1778,7 @@
},
{
"cell_type": "code",
"execution_count": 59,
"execution_count": 56,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1816,7 +1801,7 @@
},
{
"cell_type": "code",
"execution_count": 60,
"execution_count": 57,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1839,7 +1824,7 @@
},
{
"cell_type": "code",
"execution_count": 61,
"execution_count": 58,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1862,7 +1847,7 @@
},
{
"cell_type": "code",
"execution_count": 62,
"execution_count": 59,
"metadata": {
"collapsed": true,
"deletable": true,
@ -1885,7 +1870,7 @@
},
{
"cell_type": "code",
"execution_count": 63,
"execution_count": 60,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1920,7 +1905,7 @@
},
{
"cell_type": "code",
"execution_count": 64,
"execution_count": 61,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1948,7 +1933,7 @@
},
{
"cell_type": "code",
"execution_count": 65,
"execution_count": 62,
"metadata": {
"collapsed": false,
"deletable": true,
@ -1992,7 +1977,7 @@
},
{
"cell_type": "code",
"execution_count": 66,
"execution_count": 63,
"metadata": {
"collapsed": false,
"deletable": true,
@ -2016,7 +2001,7 @@
},
{
"cell_type": "code",
"execution_count": 67,
"execution_count": 64,
"metadata": {
"collapsed": false,
"deletable": true,
@ -2042,7 +2027,7 @@
},
{
"cell_type": "code",
"execution_count": 68,
"execution_count": 65,
"metadata": {
"collapsed": false,
"deletable": true,
@ -2053,7 +2038,7 @@
"test_ratio = 0.2\n",
"train_size = int(len(flower_paths_and_classes) * (1 - test_ratio))\n",
"\n",
"rnd.shuffle(flower_paths_and_classes)\n",
"np.random.shuffle(flower_paths_and_classes)\n",
"\n",
"flower_paths_and_classes_train = flower_paths_and_classes[:train_size]\n",
"flower_paths_and_classes_test = flower_paths_and_classes[train_size:]"
@ -2071,7 +2056,7 @@
},
{
"cell_type": "code",
"execution_count": 69,
"execution_count": 66,
"metadata": {
"collapsed": false,
"deletable": true,
@ -2094,7 +2079,7 @@
},
{
"cell_type": "code",
"execution_count": 70,
"execution_count": 67,
"metadata": {
"collapsed": true,
"deletable": true,
@ -2115,7 +2100,7 @@
},
{
"cell_type": "code",
"execution_count": 71,
"execution_count": 68,
"metadata": {
"collapsed": false,
"deletable": true,
@ -2128,7 +2113,7 @@
},
{
"cell_type": "code",
"execution_count": 72,
"execution_count": 69,
"metadata": {
"collapsed": false,
"deletable": true,
@ -2141,7 +2126,7 @@
},
{
"cell_type": "code",
"execution_count": 73,
"execution_count": 70,
"metadata": {
"collapsed": false,
"deletable": true,
@ -2154,7 +2139,7 @@
},
{
"cell_type": "code",
"execution_count": 74,
"execution_count": 71,
"metadata": {
"collapsed": false,
"deletable": true,
@ -2167,7 +2152,7 @@
},
{
"cell_type": "code",
"execution_count": 75,
"execution_count": 72,
"metadata": {
"collapsed": false,
"deletable": true,
@ -2190,7 +2175,7 @@
},
{
"cell_type": "code",
"execution_count": 76,
"execution_count": 73,
"metadata": {
"collapsed": false,
"deletable": true,
@ -2203,7 +2188,7 @@
},
{
"cell_type": "code",
"execution_count": 77,
"execution_count": 74,
"metadata": {
"collapsed": false,
"deletable": true,
@ -2236,7 +2221,7 @@
},
{
"cell_type": "code",
"execution_count": 78,
"execution_count": 75,
"metadata": {
"collapsed": false,
"deletable": true,
@ -2249,7 +2234,7 @@
},
{
"cell_type": "code",
"execution_count": 79,
"execution_count": null,
"metadata": {
"collapsed": false,
"deletable": true,
@ -2282,7 +2267,7 @@
},
{
"cell_type": "code",
"execution_count": 80,
"execution_count": null,
"metadata": {
"collapsed": false,
"deletable": true,
@ -2317,9 +2302,12 @@
},
{
"cell_type": "markdown",
"metadata": {},
"metadata": {
"deletable": true,
"editable": true
},
"source": [
"Okay, 68% accuracy is not great (in fact, it's really bad), but this is only after 10 epochs, and freezing all layers except for the output layer. If you have a GPU, you can try again and let training run for much longer (e.g., using early stopping to decide when to stop). You can also improve the image preprocessing function to make more tweaks to the image (e.g., changing the brightness and hue, rotate the image slightly). You can reach above 95% accuracy on this task. If you want to dig deeper, this [great blog post](https://kwotsin.github.io/tech/2017/02/11/transfer-learning.html) goes into more details and reaches 96% accuracy."
"Okay, 72.3% accuracy is not great (in fact, it's really bad), but this is only after 10 epochs, and freezing all layers except for the output layer. If you have a GPU, you can try again and let training run for much longer (e.g., using early stopping to decide when to stop). You can also improve the image preprocessing function to make more tweaks to the image (e.g., changing the brightness and hue, rotate the image slightly). You can reach above 95% accuracy on this task. If you want to dig deeper, this [great blog post](https://kwotsin.github.io/tech/2017/02/11/transfer-learning.html) goes into more details and reaches 96% accuracy."
]
},
{
@ -2346,7 +2334,7 @@
},
{
"cell_type": "code",
"execution_count": 81,
"execution_count": null,
"metadata": {
"collapsed": true,
"deletable": true,