diff --git a/11_deep_learning.ipynb b/11_deep_learning.ipynb index 27d8e7e..5c440ec 100644 --- a/11_deep_learning.ipynb +++ b/11_deep_learning.ipynb @@ -504,7 +504,7 @@ " xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)\n", " reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n", " base_loss = tf.reduce_mean(xentropy, name=\"base_loss\")\n", - " loss = tf.add(base_loss, reg_losses, name=\"loss\")\n", + " loss = tf.add_n([base_loss] + reg_losses, name=\"loss\")\n", "\n", "with tf.name_scope(\"train\"):\n", " optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)\n", @@ -749,6 +749,7 @@ " for iteration in range(len(mnist.test.labels)//batch_size):\n", " X_batch, y_batch = mnist.train.next_batch(batch_size)\n", " sess.run(training_op, feed_dict={is_training: True, X: X_batch, y: y_batch})\n", + " sess.run(clip_all_weights)\n", " acc_train = accuracy.eval(feed_dict={is_training: False, X: X_batch, y: y_batch})\n", " acc_test = accuracy.eval(feed_dict={is_training: False, X: mnist.test.images, y: mnist.test.labels})\n", " print(epoch, \"Train accuracy:\", acc_train, \"Test accuracy:\", acc_test)\n", diff --git a/15_autoencoders.ipynb b/15_autoencoders.ipynb index 8868cc5..e1e20f5 100644 --- a/15_autoencoders.ipynb +++ b/15_autoencoders.ipynb @@ -393,7 +393,7 @@ "mse = tf.reduce_mean(tf.square(outputs - X))\n", "\n", "reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n", - "loss = mse + reg_losses\n", + "loss = tf.add_n([mse] + reg_losses)\n", "\n", "optimizer = tf.train.AdamOptimizer(learning_rate)\n", "training_op = optimizer.minimize(loss)\n", @@ -545,7 +545,7 @@ " mse = tf.reduce_mean(tf.square(outputs - X))\n", "\n", " reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n", - " loss = mse + reg_losses\n", + " loss = tf.add_n([mse] + reg_losses)\n", "\n", " optimizer = tf.train.AdamOptimizer(learning_rate)\n", " training_op = optimizer.minimize(loss)\n",