Replace keras with tf.keras

main
Aurélien Geron 2021-10-17 15:04:08 +13:00
parent 0253e950ea
commit 9145dfbab1
10 changed files with 1122 additions and 1122 deletions

View File

@ -310,14 +310,14 @@
"metadata": {},
"outputs": [],
"source": [
"keras.__version__"
"tf.keras.__version__"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's start by loading the fashion MNIST dataset. Keras has a number of functions to load popular datasets in `keras.datasets`. The dataset is already split for you between a training set and a test set, but it can be useful to split the training set further to have a validation set:"
"Let's start by loading the fashion MNIST dataset. Keras has a number of functions to load popular datasets in `tf.keras.datasets`. The dataset is already split for you between a training set and a test set, but it can be useful to split the training set further to have a validation set:"
]
},
{
@ -326,7 +326,7 @@
"metadata": {},
"outputs": [],
"source": [
"fashion_mnist = keras.datasets.fashion_mnist\n",
"fashion_mnist = tf.keras.datasets.fashion_mnist\n",
"(X_train_full, y_train_full), (X_test, y_test) = fashion_mnist.load_data()"
]
},
@ -507,11 +507,11 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential()\n",
"model.add(keras.layers.Flatten(input_shape=[28, 28]))\n",
"model.add(keras.layers.Dense(300, activation=\"relu\"))\n",
"model.add(keras.layers.Dense(100, activation=\"relu\"))\n",
"model.add(keras.layers.Dense(10, activation=\"softmax\"))"
"model = tf.keras.Sequential()\n",
"model.add(tf.keras.layers.Flatten(input_shape=[28, 28]))\n",
"model.add(tf.keras.layers.Dense(300, activation=\"relu\"))\n",
"model.add(tf.keras.layers.Dense(100, activation=\"relu\"))\n",
"model.add(tf.keras.layers.Dense(10, activation=\"softmax\"))"
]
},
{
@ -520,7 +520,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -531,11 +531,11 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
" keras.layers.Flatten(input_shape=[28, 28]),\n",
" keras.layers.Dense(300, activation=\"relu\"),\n",
" keras.layers.Dense(100, activation=\"relu\"),\n",
" keras.layers.Dense(10, activation=\"softmax\")\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Flatten(input_shape=[28, 28]),\n",
" tf.keras.layers.Dense(300, activation=\"relu\"),\n",
" tf.keras.layers.Dense(100, activation=\"relu\"),\n",
" tf.keras.layers.Dense(10, activation=\"softmax\")\n",
"])"
]
},
@ -563,7 +563,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.utils.plot_model(model, \"my_fashion_mnist_model.png\", show_shapes=True)"
"tf.keras.utils.plot_model(model, \"my_fashion_mnist_model.png\", show_shapes=True)"
]
},
{
@ -653,9 +653,9 @@
"metadata": {},
"source": [
"```python\n",
"model.compile(loss=keras.losses.sparse_categorical_crossentropy,\n",
" optimizer=keras.optimizers.SGD(),\n",
" metrics=[keras.metrics.sparse_categorical_accuracy])\n",
"model.compile(loss=tf.keras.losses.sparse_categorical_crossentropy,\n",
" optimizer=tf.keras.optimizers.SGD(),\n",
" metrics=[tf.keras.metrics.sparse_categorical_accuracy])\n",
"```"
]
},
@ -836,11 +836,11 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
" keras.layers.Dense(30, activation=\"relu\", input_shape=X_train.shape[1:]),\n",
" keras.layers.Dense(1)\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(30, activation=\"relu\", input_shape=X_train.shape[1:]),\n",
" tf.keras.layers.Dense(1)\n",
"])\n",
"model.compile(loss=\"mean_squared_error\", optimizer=keras.optimizers.SGD(learning_rate=1e-3))\n",
"model.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3))\n",
"history = model.fit(X_train, y_train, epochs=20, validation_data=(X_valid, y_valid))\n",
"mse_test = model.evaluate(X_test, y_test)\n",
"X_new = X_test[:3]\n",
@ -898,12 +898,12 @@
"metadata": {},
"outputs": [],
"source": [
"input_ = keras.layers.Input(shape=X_train.shape[1:])\n",
"hidden1 = keras.layers.Dense(30, activation=\"relu\")(input_)\n",
"hidden2 = keras.layers.Dense(30, activation=\"relu\")(hidden1)\n",
"concat = keras.layers.concatenate([input_, hidden2])\n",
"output = keras.layers.Dense(1)(concat)\n",
"model = keras.models.Model(inputs=[input_], outputs=[output])"
"input_ = tf.keras.layers.Input(shape=X_train.shape[1:])\n",
"hidden1 = tf.keras.layers.Dense(30, activation=\"relu\")(input_)\n",
"hidden2 = tf.keras.layers.Dense(30, activation=\"relu\")(hidden1)\n",
"concat = tf.keras.layers.concatenate([input_, hidden2])\n",
"output = tf.keras.layers.Dense(1)(concat)\n",
"model = tf.keras.Model(inputs=[input_], outputs=[output])"
]
},
{
@ -921,7 +921,7 @@
"metadata": {},
"outputs": [],
"source": [
"model.compile(loss=\"mean_squared_error\", optimizer=keras.optimizers.SGD(learning_rate=1e-3))\n",
"model.compile(loss=\"mean_squared_error\", optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3))\n",
"history = model.fit(X_train, y_train, epochs=20,\n",
" validation_data=(X_valid, y_valid))\n",
"mse_test = model.evaluate(X_test, y_test)\n",
@ -951,13 +951,13 @@
"metadata": {},
"outputs": [],
"source": [
"input_A = keras.layers.Input(shape=[5], name=\"wide_input\")\n",
"input_B = keras.layers.Input(shape=[6], name=\"deep_input\")\n",
"hidden1 = keras.layers.Dense(30, activation=\"relu\")(input_B)\n",
"hidden2 = keras.layers.Dense(30, activation=\"relu\")(hidden1)\n",
"concat = keras.layers.concatenate([input_A, hidden2])\n",
"output = keras.layers.Dense(1, name=\"output\")(concat)\n",
"model = keras.models.Model(inputs=[input_A, input_B], outputs=[output])"
"input_A = tf.keras.layers.Input(shape=[5], name=\"wide_input\")\n",
"input_B = tf.keras.layers.Input(shape=[6], name=\"deep_input\")\n",
"hidden1 = tf.keras.layers.Dense(30, activation=\"relu\")(input_B)\n",
"hidden2 = tf.keras.layers.Dense(30, activation=\"relu\")(hidden1)\n",
"concat = tf.keras.layers.concatenate([input_A, hidden2])\n",
"output = tf.keras.layers.Dense(1, name=\"output\")(concat)\n",
"model = tf.keras.Model(inputs=[input_A, input_B], outputs=[output])"
]
},
{
@ -966,7 +966,7 @@
"metadata": {},
"outputs": [],
"source": [
"model.compile(loss=\"mse\", optimizer=keras.optimizers.SGD(learning_rate=1e-3))\n",
"model.compile(loss=\"mse\", optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3))\n",
"\n",
"X_train_A, X_train_B = X_train[:, :5], X_train[:, 2:]\n",
"X_valid_A, X_valid_B = X_valid[:, :5], X_valid[:, 2:]\n",
@ -1002,14 +1002,14 @@
"metadata": {},
"outputs": [],
"source": [
"input_A = keras.layers.Input(shape=[5], name=\"wide_input\")\n",
"input_B = keras.layers.Input(shape=[6], name=\"deep_input\")\n",
"hidden1 = keras.layers.Dense(30, activation=\"relu\")(input_B)\n",
"hidden2 = keras.layers.Dense(30, activation=\"relu\")(hidden1)\n",
"concat = keras.layers.concatenate([input_A, hidden2])\n",
"output = keras.layers.Dense(1, name=\"main_output\")(concat)\n",
"aux_output = keras.layers.Dense(1, name=\"aux_output\")(hidden2)\n",
"model = keras.models.Model(inputs=[input_A, input_B],\n",
"input_A = tf.keras.layers.Input(shape=[5], name=\"wide_input\")\n",
"input_B = tf.keras.layers.Input(shape=[6], name=\"deep_input\")\n",
"hidden1 = tf.keras.layers.Dense(30, activation=\"relu\")(input_B)\n",
"hidden2 = tf.keras.layers.Dense(30, activation=\"relu\")(hidden1)\n",
"concat = tf.keras.layers.concatenate([input_A, hidden2])\n",
"output = tf.keras.layers.Dense(1, name=\"main_output\")(concat)\n",
"aux_output = tf.keras.layers.Dense(1, name=\"aux_output\")(hidden2)\n",
"model = tf.keras.Model(inputs=[input_A, input_B],\n",
" outputs=[output, aux_output])"
]
},
@ -1019,7 +1019,7 @@
"metadata": {},
"outputs": [],
"source": [
"model.compile(loss=[\"mse\", \"mse\"], loss_weights=[0.9, 0.1], optimizer=keras.optimizers.SGD(learning_rate=1e-3))"
"model.compile(loss=[\"mse\", \"mse\"], loss_weights=[0.9, 0.1], optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3))"
]
},
{
@ -1056,19 +1056,19 @@
"metadata": {},
"outputs": [],
"source": [
"class WideAndDeepModel(keras.models.Model):\n",
"class WideAndDeepModel(tf.keras.Model):\n",
" def __init__(self, units=30, activation=\"relu\", **kwargs):\n",
" super().__init__(**kwargs)\n",
" self.hidden1 = keras.layers.Dense(units, activation=activation)\n",
" self.hidden2 = keras.layers.Dense(units, activation=activation)\n",
" self.main_output = keras.layers.Dense(1)\n",
" self.aux_output = keras.layers.Dense(1)\n",
" self.hidden1 = tf.keras.layers.Dense(units, activation=activation)\n",
" self.hidden2 = tf.keras.layers.Dense(units, activation=activation)\n",
" self.main_output = tf.keras.layers.Dense(1)\n",
" self.aux_output = tf.keras.layers.Dense(1)\n",
" \n",
" def call(self, inputs):\n",
" input_A, input_B = inputs\n",
" hidden1 = self.hidden1(input_B)\n",
" hidden2 = self.hidden2(hidden1)\n",
" concat = keras.layers.concatenate([input_A, hidden2])\n",
" concat = tf.keras.layers.concatenate([input_A, hidden2])\n",
" main_output = self.main_output(concat)\n",
" aux_output = self.aux_output(hidden2)\n",
" return main_output, aux_output\n",
@ -1082,7 +1082,7 @@
"metadata": {},
"outputs": [],
"source": [
"model.compile(loss=\"mse\", loss_weights=[0.9, 0.1], optimizer=keras.optimizers.SGD(learning_rate=1e-3))\n",
"model.compile(loss=\"mse\", loss_weights=[0.9, 0.1], optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3))\n",
"history = model.fit((X_train_A, X_train_B), (y_train, y_train), epochs=10,\n",
" validation_data=((X_valid_A, X_valid_B), (y_valid, y_valid)))\n",
"total_loss, main_loss, aux_loss = model.evaluate((X_test_A, X_test_B), (y_test, y_test))\n",
@ -1112,10 +1112,10 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
" keras.layers.Dense(30, activation=\"relu\", input_shape=[8]),\n",
" keras.layers.Dense(30, activation=\"relu\"),\n",
" keras.layers.Dense(1)\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(30, activation=\"relu\", input_shape=[8]),\n",
" tf.keras.layers.Dense(30, activation=\"relu\"),\n",
" tf.keras.layers.Dense(1)\n",
"]) "
]
},
@ -1125,7 +1125,7 @@
"metadata": {},
"outputs": [],
"source": [
"model.compile(loss=\"mse\", optimizer=keras.optimizers.SGD(learning_rate=1e-3))\n",
"model.compile(loss=\"mse\", optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3))\n",
"history = model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))\n",
"mse_test = model.evaluate(X_test, y_test)"
]
@ -1145,7 +1145,7 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.load_model(\"my_keras_model.h5\")"
"model = tf.keras.models.load_model(\"my_keras_model.h5\")"
]
},
{
@ -1188,7 +1188,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -1199,10 +1199,10 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
" keras.layers.Dense(30, activation=\"relu\", input_shape=[8]),\n",
" keras.layers.Dense(30, activation=\"relu\"),\n",
" keras.layers.Dense(1)\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(30, activation=\"relu\", input_shape=[8]),\n",
" tf.keras.layers.Dense(30, activation=\"relu\"),\n",
" tf.keras.layers.Dense(1)\n",
"]) "
]
},
@ -1212,12 +1212,12 @@
"metadata": {},
"outputs": [],
"source": [
"model.compile(loss=\"mse\", optimizer=keras.optimizers.SGD(learning_rate=1e-3))\n",
"checkpoint_cb = keras.callbacks.ModelCheckpoint(\"my_keras_model.h5\", save_best_only=True)\n",
"model.compile(loss=\"mse\", optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3))\n",
"checkpoint_cb = tf.keras.callbacks.ModelCheckpoint(\"my_keras_model.h5\", save_best_only=True)\n",
"history = model.fit(X_train, y_train, epochs=10,\n",
" validation_data=(X_valid, y_valid),\n",
" callbacks=[checkpoint_cb])\n",
"model = keras.models.load_model(\"my_keras_model.h5\") # rollback to best model\n",
"model = tf.keras.models.load_model(\"my_keras_model.h5\") # rollback to best model\n",
"mse_test = model.evaluate(X_test, y_test)"
]
},
@ -1227,8 +1227,8 @@
"metadata": {},
"outputs": [],
"source": [
"model.compile(loss=\"mse\", optimizer=keras.optimizers.SGD(learning_rate=1e-3))\n",
"early_stopping_cb = keras.callbacks.EarlyStopping(patience=10,\n",
"model.compile(loss=\"mse\", optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3))\n",
"early_stopping_cb = tf.keras.callbacks.EarlyStopping(patience=10,\n",
" restore_best_weights=True)\n",
"history = model.fit(X_train, y_train, epochs=100,\n",
" validation_data=(X_valid, y_valid),\n",
@ -1242,7 +1242,7 @@
"metadata": {},
"outputs": [],
"source": [
"class PrintValTrainRatioCallback(keras.callbacks.Callback):\n",
"class PrintValTrainRatioCallback(tf.keras.callbacks.Callback):\n",
" def on_epoch_end(self, epoch, logs):\n",
" print(\"\\nval/train: {:.2f}\".format(logs[\"val_loss\"] / logs[\"loss\"]))"
]
@ -1296,7 +1296,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -1307,12 +1307,12 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
" keras.layers.Dense(30, activation=\"relu\", input_shape=[8]),\n",
" keras.layers.Dense(30, activation=\"relu\"),\n",
" keras.layers.Dense(1)\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(30, activation=\"relu\", input_shape=[8]),\n",
" tf.keras.layers.Dense(30, activation=\"relu\"),\n",
" tf.keras.layers.Dense(1)\n",
"]) \n",
"model.compile(loss=\"mse\", optimizer=keras.optimizers.SGD(learning_rate=1e-3))"
"model.compile(loss=\"mse\", optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3))"
]
},
{
@ -1321,7 +1321,7 @@
"metadata": {},
"outputs": [],
"source": [
"tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)\n",
"tensorboard_cb = tf.keras.callbacks.TensorBoard(run_logdir)\n",
"history = model.fit(X_train, y_train, epochs=30,\n",
" validation_data=(X_valid, y_valid),\n",
" callbacks=[checkpoint_cb, tensorboard_cb])"
@ -1368,7 +1368,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -1379,12 +1379,12 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
" keras.layers.Dense(30, activation=\"relu\", input_shape=[8]),\n",
" keras.layers.Dense(30, activation=\"relu\"),\n",
" keras.layers.Dense(1)\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(30, activation=\"relu\", input_shape=[8]),\n",
" tf.keras.layers.Dense(30, activation=\"relu\"),\n",
" tf.keras.layers.Dense(1)\n",
"]) \n",
"model.compile(loss=\"mse\", optimizer=keras.optimizers.SGD(learning_rate=0.05))"
"model.compile(loss=\"mse\", optimizer=tf.keras.optimizers.SGD(learning_rate=0.05))"
]
},
{
@ -1393,7 +1393,7 @@
"metadata": {},
"outputs": [],
"source": [
"tensorboard_cb = keras.callbacks.TensorBoard(run_logdir2)\n",
"tensorboard_cb = tf.keras.callbacks.TensorBoard(run_logdir2)\n",
"history = model.fit(X_train, y_train, epochs=30,\n",
" validation_data=(X_valid, y_valid),\n",
" callbacks=[checkpoint_cb, tensorboard_cb])"
@ -1419,7 +1419,7 @@
"metadata": {},
"outputs": [],
"source": [
"help(keras.callbacks.TensorBoard.__init__)"
"help(tf.keras.callbacks.TensorBoard.__init__)"
]
},
{
@ -1435,7 +1435,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -1447,12 +1447,12 @@
"outputs": [],
"source": [
"def build_model(n_hidden=1, n_neurons=30, learning_rate=3e-3, input_shape=[8]):\n",
" model = keras.models.Sequential()\n",
" model.add(keras.layers.InputLayer(input_shape=input_shape))\n",
" model = tf.keras.Sequential()\n",
" model.add(tf.keras.layers.InputLayer(input_shape=input_shape))\n",
" for layer in range(n_hidden):\n",
" model.add(keras.layers.Dense(n_neurons, activation=\"relu\"))\n",
" model.add(keras.layers.Dense(1))\n",
" optimizer = keras.optimizers.SGD(learning_rate=learning_rate)\n",
" model.add(tf.keras.layers.Dense(n_neurons, activation=\"relu\"))\n",
" model.add(tf.keras.layers.Dense(1))\n",
" optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)\n",
" model.compile(loss=\"mse\", optimizer=optimizer)\n",
" return model"
]
@ -1463,7 +1463,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras_reg = keras.wrappers.scikit_learn.KerasRegressor(build_model)"
"keras_reg = tf.keras.wrappers.scikit_learn.KerasRegressor(build_model)"
]
},
{
@ -1474,7 +1474,7 @@
"source": [
"keras_reg.fit(X_train, y_train, epochs=100,\n",
" validation_data=(X_valid, y_valid),\n",
" callbacks=[keras.callbacks.EarlyStopping(patience=10)])"
" callbacks=[tf.keras.callbacks.EarlyStopping(patience=10)])"
]
},
{
@ -1530,7 +1530,7 @@
"rnd_search_cv = RandomizedSearchCV(keras_reg, param_distribs, n_iter=10, cv=3, verbose=2)\n",
"rnd_search_cv.fit(X_train, y_train, epochs=100,\n",
" validation_data=(X_valid, y_valid),\n",
" callbacks=[keras.callbacks.EarlyStopping(patience=10)])"
" callbacks=[tf.keras.callbacks.EarlyStopping(patience=10)])"
]
},
{
@ -1622,7 +1622,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"*Exercise: Train a deep MLP on the MNIST dataset (you can load it using `keras.datasets.mnist.load_data()`. See if you can get over 98% precision. Try searching for the optimal learning rate by using the approach presented in this chapter (i.e., by growing the learning rate exponentially, plotting the loss, and finding the point where the loss shoots up). Try adding all the bells and whistles—save checkpoints, use early stopping, and plot learning curves using TensorBoard.*"
"*Exercise: Train a deep MLP on the MNIST dataset (you can load it using `tf.keras.datasets.mnist.load_data()`. See if you can get over 98% precision. Try searching for the optimal learning rate by using the approach presented in this chapter (i.e., by growing the learning rate exponentially, plotting the loss, and finding the point where the loss shoots up). Try adding all the bells and whistles—save checkpoints, use early stopping, and plot learning curves using TensorBoard.*"
]
},
{
@ -1638,7 +1638,7 @@
"metadata": {},
"outputs": [],
"source": [
"(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()"
"(X_train_full, y_train_full), (X_test, y_test) = tf.keras.datasets.mnist.load_data()"
]
},
{
@ -1791,9 +1791,9 @@
"metadata": {},
"outputs": [],
"source": [
"K = keras.backend\n",
"K = tf.keras.backend\n",
"\n",
"class ExponentialLearningRate(keras.callbacks.Callback):\n",
"class ExponentialLearningRate(tf.keras.callbacks.Callback):\n",
" def __init__(self, factor):\n",
" self.factor = factor\n",
" self.rates = []\n",
@ -1810,7 +1810,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -1821,11 +1821,11 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
" keras.layers.Flatten(input_shape=[28, 28]),\n",
" keras.layers.Dense(300, activation=\"relu\"),\n",
" keras.layers.Dense(100, activation=\"relu\"),\n",
" keras.layers.Dense(10, activation=\"softmax\")\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Flatten(input_shape=[28, 28]),\n",
" tf.keras.layers.Dense(300, activation=\"relu\"),\n",
" tf.keras.layers.Dense(100, activation=\"relu\"),\n",
" tf.keras.layers.Dense(10, activation=\"softmax\")\n",
"])"
]
},
@ -1843,7 +1843,7 @@
"outputs": [],
"source": [
"model.compile(loss=\"sparse_categorical_crossentropy\",\n",
" optimizer=keras.optimizers.SGD(learning_rate=1e-3),\n",
" optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3),\n",
" metrics=[\"accuracy\"])\n",
"expon_lr = ExponentialLearningRate(factor=1.005)"
]
@ -1901,7 +1901,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -1912,11 +1912,11 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
" keras.layers.Flatten(input_shape=[28, 28]),\n",
" keras.layers.Dense(300, activation=\"relu\"),\n",
" keras.layers.Dense(100, activation=\"relu\"),\n",
" keras.layers.Dense(10, activation=\"softmax\")\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Flatten(input_shape=[28, 28]),\n",
" tf.keras.layers.Dense(300, activation=\"relu\"),\n",
" tf.keras.layers.Dense(100, activation=\"relu\"),\n",
" tf.keras.layers.Dense(10, activation=\"softmax\")\n",
"])"
]
},
@ -1927,7 +1927,7 @@
"outputs": [],
"source": [
"model.compile(loss=\"sparse_categorical_crossentropy\",\n",
" optimizer=keras.optimizers.SGD(learning_rate=3e-1),\n",
" optimizer=tf.keras.optimizers.SGD(learning_rate=3e-1),\n",
" metrics=[\"accuracy\"])"
]
},
@ -1948,9 +1948,9 @@
"metadata": {},
"outputs": [],
"source": [
"early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)\n",
"checkpoint_cb = keras.callbacks.ModelCheckpoint(\"my_mnist_model.h5\", save_best_only=True)\n",
"tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)\n",
"early_stopping_cb = tf.keras.callbacks.EarlyStopping(patience=20)\n",
"checkpoint_cb = tf.keras.callbacks.ModelCheckpoint(\"my_mnist_model.h5\", save_best_only=True)\n",
"tensorboard_cb = tf.keras.callbacks.TensorBoard(run_logdir)\n",
"\n",
"history = model.fit(X_train, y_train, epochs=100,\n",
" validation_data=(X_valid, y_valid),\n",
@ -1963,7 +1963,7 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.load_model(\"my_mnist_model.h5\") # rollback to best model\n",
"model = tf.keras.models.load_model(\"my_mnist_model.h5\") # rollback to best model\n",
"model.evaluate(X_test, y_test)"
]
},

File diff suppressed because it is too large Load Diff

View File

@ -200,7 +200,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"### Using `keras.backend`"
"### Using `tf.keras.backend`"
]
},
{
@ -210,7 +210,7 @@
"outputs": [],
"source": [
"from tensorflow import keras\n",
"K = keras.backend\n",
"K = tf.keras.backend\n",
"K.square(K.transpose(t)) + 10"
]
},
@ -818,10 +818,10 @@
"source": [
"input_shape = X_train.shape[1:]\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.Dense(30, activation=\"selu\", kernel_initializer=\"lecun_normal\",\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(30, activation=\"selu\", kernel_initializer=\"lecun_normal\",\n",
" input_shape=input_shape),\n",
" keras.layers.Dense(1),\n",
" tf.keras.layers.Dense(1),\n",
"])"
]
},
@ -868,7 +868,7 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.load_model(\"my_model_with_a_custom_loss.h5\",\n",
"model = tf.keras.models.load_model(\"my_model_with_a_custom_loss.h5\",\n",
" custom_objects={\"huber_fn\": huber_fn})"
]
},
@ -934,7 +934,7 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.load_model(\"my_model_with_a_custom_loss_threshold_2.h5\",\n",
"model = tf.keras.models.load_model(\"my_model_with_a_custom_loss_threshold_2.h5\",\n",
" custom_objects={\"huber_fn\": create_huber(2.0)})"
]
},
@ -954,7 +954,7 @@
"metadata": {},
"outputs": [],
"source": [
"class HuberLoss(keras.losses.Loss):\n",
"class HuberLoss(tf.keras.losses.Loss):\n",
" def __init__(self, threshold=1.0, **kwargs):\n",
" self.threshold = threshold\n",
" super().__init__(**kwargs)\n",
@ -975,10 +975,10 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
" keras.layers.Dense(30, activation=\"selu\", kernel_initializer=\"lecun_normal\",\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(30, activation=\"selu\", kernel_initializer=\"lecun_normal\",\n",
" input_shape=input_shape),\n",
" keras.layers.Dense(1),\n",
" tf.keras.layers.Dense(1),\n",
"])"
]
},
@ -1016,7 +1016,7 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.load_model(\"my_model_with_a_custom_loss_class.h5\",\n",
"model = tf.keras.models.load_model(\"my_model_with_a_custom_loss_class.h5\",\n",
" custom_objects={\"HuberLoss\": HuberLoss})"
]
},
@ -1052,7 +1052,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -1083,7 +1083,7 @@
"metadata": {},
"outputs": [],
"source": [
"layer = keras.layers.Dense(1, activation=my_softplus,\n",
"layer = tf.keras.layers.Dense(1, activation=my_softplus,\n",
" kernel_initializer=my_glorot_initializer,\n",
" kernel_regularizer=my_l1_regularizer,\n",
" kernel_constraint=my_positive_weights)"
@ -1095,7 +1095,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -1106,10 +1106,10 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
" keras.layers.Dense(30, activation=\"selu\", kernel_initializer=\"lecun_normal\",\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(30, activation=\"selu\", kernel_initializer=\"lecun_normal\",\n",
" input_shape=input_shape),\n",
" keras.layers.Dense(1, activation=my_softplus,\n",
" tf.keras.layers.Dense(1, activation=my_softplus,\n",
" kernel_regularizer=my_l1_regularizer,\n",
" kernel_constraint=my_positive_weights,\n",
" kernel_initializer=my_glorot_initializer),\n",
@ -1150,7 +1150,7 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.load_model(\n",
"model = tf.keras.models.load_model(\n",
" \"my_model_with_many_custom_parts.h5\",\n",
" custom_objects={\n",
" \"my_l1_regularizer\": my_l1_regularizer,\n",
@ -1166,7 +1166,7 @@
"metadata": {},
"outputs": [],
"source": [
"class MyL1Regularizer(keras.regularizers.Regularizer):\n",
"class MyL1Regularizer(tf.keras.regularizers.Regularizer):\n",
" def __init__(self, factor):\n",
" self.factor = factor\n",
" def __call__(self, weights):\n",
@ -1181,7 +1181,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -1192,10 +1192,10 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
" keras.layers.Dense(30, activation=\"selu\", kernel_initializer=\"lecun_normal\",\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(30, activation=\"selu\", kernel_initializer=\"lecun_normal\",\n",
" input_shape=input_shape),\n",
" keras.layers.Dense(1, activation=my_softplus,\n",
" tf.keras.layers.Dense(1, activation=my_softplus,\n",
" kernel_regularizer=MyL1Regularizer(0.01),\n",
" kernel_constraint=my_positive_weights,\n",
" kernel_initializer=my_glorot_initializer),\n",
@ -1236,7 +1236,7 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.load_model(\n",
"model = tf.keras.models.load_model(\n",
" \"my_model_with_many_custom_parts.h5\",\n",
" custom_objects={\n",
" \"MyL1Regularizer\": MyL1Regularizer,\n",
@ -1259,7 +1259,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -1270,10 +1270,10 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
" keras.layers.Dense(30, activation=\"selu\", kernel_initializer=\"lecun_normal\",\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(30, activation=\"selu\", kernel_initializer=\"lecun_normal\",\n",
" input_shape=input_shape),\n",
" keras.layers.Dense(1),\n",
" tf.keras.layers.Dense(1),\n",
"])"
]
},
@ -1347,7 +1347,7 @@
"metadata": {},
"outputs": [],
"source": [
"precision = keras.metrics.Precision()\n",
"precision = tf.keras.metrics.Precision()\n",
"precision([0, 1, 1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 0, 1, 0, 1])"
]
},
@ -1400,7 +1400,7 @@
"metadata": {},
"outputs": [],
"source": [
"class HuberMetric(keras.metrics.Metric):\n",
"class HuberMetric(tf.keras.metrics.Metric):\n",
" def __init__(self, threshold=1.0, **kwargs):\n",
" super().__init__(**kwargs) # handles base args (e.g., dtype)\n",
" self.threshold = threshold\n",
@ -1478,7 +1478,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -1489,10 +1489,10 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
" keras.layers.Dense(30, activation=\"selu\", kernel_initializer=\"lecun_normal\",\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(30, activation=\"selu\", kernel_initializer=\"lecun_normal\",\n",
" input_shape=input_shape),\n",
" keras.layers.Dense(1),\n",
" tf.keras.layers.Dense(1),\n",
"])"
]
},
@ -1529,7 +1529,7 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.load_model(\"my_model_with_a_custom_metric.h5\",\n",
"model = tf.keras.models.load_model(\"my_model_with_a_custom_metric.h5\",\n",
" custom_objects={\"huber_fn\": create_huber(2.0),\n",
" \"HuberMetric\": HuberMetric})"
]
@ -1572,7 +1572,7 @@
"metadata": {},
"outputs": [],
"source": [
"class HuberMetric(keras.metrics.Mean):\n",
"class HuberMetric(tf.keras.metrics.Mean):\n",
" def __init__(self, threshold=1.0, name='HuberMetric', dtype=None):\n",
" self.threshold = threshold\n",
" self.huber_fn = create_huber(threshold)\n",
@ -1598,7 +1598,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -1609,10 +1609,10 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
" keras.layers.Dense(30, activation=\"selu\", kernel_initializer=\"lecun_normal\",\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(30, activation=\"selu\", kernel_initializer=\"lecun_normal\",\n",
" input_shape=input_shape),\n",
" keras.layers.Dense(1),\n",
" tf.keras.layers.Dense(1),\n",
"])"
]
},
@ -1622,7 +1622,7 @@
"metadata": {},
"outputs": [],
"source": [
"model.compile(loss=keras.losses.Huber(2.0), optimizer=\"nadam\", weighted_metrics=[HuberMetric(2.0)])"
"model.compile(loss=tf.keras.losses.Huber(2.0), optimizer=\"nadam\", weighted_metrics=[HuberMetric(2.0)])"
]
},
{
@ -1662,7 +1662,7 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.load_model(\"my_model_with_a_custom_metric_v2.h5\",\n",
"model = tf.keras.models.load_model(\"my_model_with_a_custom_metric_v2.h5\",\n",
" custom_objects={\"HuberMetric\": HuberMetric})"
]
},
@ -1699,7 +1699,7 @@
"metadata": {},
"outputs": [],
"source": [
"exponential_layer = keras.layers.Lambda(lambda x: tf.exp(x))"
"exponential_layer = tf.keras.layers.Lambda(lambda x: tf.exp(x))"
]
},
{
@ -1724,7 +1724,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -1735,9 +1735,9 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
" keras.layers.Dense(30, activation=\"relu\", input_shape=input_shape),\n",
" keras.layers.Dense(1),\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(30, activation=\"relu\", input_shape=input_shape),\n",
" tf.keras.layers.Dense(1),\n",
" exponential_layer\n",
"])\n",
"model.compile(loss=\"mse\", optimizer=\"sgd\")\n",
@ -1752,11 +1752,11 @@
"metadata": {},
"outputs": [],
"source": [
"class MyDense(keras.layers.Layer):\n",
"class MyDense(tf.keras.layers.Layer):\n",
" def __init__(self, units, activation=None, **kwargs):\n",
" super().__init__(**kwargs)\n",
" self.units = units\n",
" self.activation = keras.activations.get(activation)\n",
" self.activation = tf.keras.activations.get(activation)\n",
"\n",
" def build(self, batch_input_shape):\n",
" self.kernel = self.add_weight(\n",
@ -1775,7 +1775,7 @@
" def get_config(self):\n",
" base_config = super().get_config()\n",
" return {**base_config, \"units\": self.units,\n",
" \"activation\": keras.activations.serialize(self.activation)}"
" \"activation\": tf.keras.activations.serialize(self.activation)}"
]
},
{
@ -1784,7 +1784,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -1795,7 +1795,7 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
"model = tf.keras.Sequential([\n",
" MyDense(30, activation=\"relu\", input_shape=input_shape),\n",
" MyDense(1)\n",
"])"
@ -1828,7 +1828,7 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.load_model(\"my_model_with_a_custom_layer.h5\",\n",
"model = tf.keras.models.load_model(\"my_model_with_a_custom_layer.h5\",\n",
" custom_objects={\"MyDense\": MyDense})"
]
},
@ -1838,7 +1838,7 @@
"metadata": {},
"outputs": [],
"source": [
"class MyMultiLayer(keras.layers.Layer):\n",
"class MyMultiLayer(tf.keras.layers.Layer):\n",
" def call(self, X):\n",
" X1, X2 = X\n",
" print(\"X1.shape: \", X1.shape ,\" X2.shape: \", X2.shape) # Debugging of custom layer\n",
@ -1862,8 +1862,8 @@
"metadata": {},
"outputs": [],
"source": [
"inputs1 = keras.layers.Input(shape=[2])\n",
"inputs2 = keras.layers.Input(shape=[2])\n",
"inputs1 = tf.keras.layers.Input(shape=[2])\n",
"inputs2 = tf.keras.layers.Input(shape=[2])\n",
"outputs1, outputs2 = MyMultiLayer()((inputs1, inputs2))"
]
},
@ -1924,18 +1924,18 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"input_A = keras.layers.Input(shape=X_train_scaled_A.shape[-1])\n",
"input_B = keras.layers.Input(shape=X_train_scaled_B.shape[-1])\n",
"input_A = tf.keras.layers.Input(shape=X_train_scaled_A.shape[-1])\n",
"input_B = tf.keras.layers.Input(shape=X_train_scaled_B.shape[-1])\n",
"hidden_A, hidden_B = MyMultiLayer()((input_A, input_B))\n",
"hidden_A = keras.layers.Dense(30, activation='selu')(hidden_A)\n",
"hidden_B = keras.layers.Dense(30, activation='selu')(hidden_B)\n",
"concat = keras.layers.Concatenate()((hidden_A, hidden_B))\n",
"output = keras.layers.Dense(1)(concat)\n",
"model = keras.models.Model(inputs=[input_A, input_B], outputs=[output])"
"hidden_A = tf.keras.layers.Dense(30, activation='selu')(hidden_A)\n",
"hidden_B = tf.keras.layers.Dense(30, activation='selu')(hidden_B)\n",
"concat = tf.keras.layers.Concatenate()((hidden_A, hidden_B))\n",
"output = tf.keras.layers.Dense(1)(concat)\n",
"model = tf.keras.Model(inputs=[input_A, input_B], outputs=[output])"
]
},
{
@ -1970,7 +1970,7 @@
"metadata": {},
"outputs": [],
"source": [
"class AddGaussianNoise(keras.layers.Layer):\n",
"class AddGaussianNoise(tf.keras.layers.Layer):\n",
" def __init__(self, stddev, **kwargs):\n",
" super().__init__(**kwargs)\n",
" self.stddev = stddev\n",
@ -1999,14 +1999,14 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"model = keras.models.Sequential([\n",
"model = tf.keras.Sequential([\n",
" AddGaussianNoise(stddev=1.0),\n",
" keras.layers.Dense(30, activation=\"selu\"),\n",
" keras.layers.Dense(1)\n",
" tf.keras.layers.Dense(30, activation=\"selu\"),\n",
" tf.keras.layers.Dense(1)\n",
"])"
]
},
@ -2044,10 +2044,10 @@
"metadata": {},
"outputs": [],
"source": [
"class ResidualBlock(keras.layers.Layer):\n",
"class ResidualBlock(tf.keras.layers.Layer):\n",
" def __init__(self, n_layers, n_neurons, **kwargs):\n",
" super().__init__(**kwargs)\n",
" self.hidden = [keras.layers.Dense(n_neurons, activation=\"elu\",\n",
" self.hidden = [tf.keras.layers.Dense(n_neurons, activation=\"elu\",\n",
" kernel_initializer=\"he_normal\")\n",
" for _ in range(n_layers)]\n",
"\n",
@ -2064,14 +2064,14 @@
"metadata": {},
"outputs": [],
"source": [
"class ResidualRegressor(keras.models.Model):\n",
"class ResidualRegressor(tf.keras.Model):\n",
" def __init__(self, output_dim, **kwargs):\n",
" super().__init__(**kwargs)\n",
" self.hidden1 = keras.layers.Dense(30, activation=\"elu\",\n",
" self.hidden1 = tf.keras.layers.Dense(30, activation=\"elu\",\n",
" kernel_initializer=\"he_normal\")\n",
" self.block1 = ResidualBlock(2, 30)\n",
" self.block2 = ResidualBlock(2, 30)\n",
" self.out = keras.layers.Dense(output_dim)\n",
" self.out = tf.keras.layers.Dense(output_dim)\n",
"\n",
" def call(self, inputs):\n",
" Z = self.hidden1(inputs)\n",
@ -2087,7 +2087,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -2120,7 +2120,7 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.load_model(\"my_custom_model.ckpt\")"
"model = tf.keras.models.load_model(\"my_custom_model.ckpt\")"
]
},
{
@ -2145,7 +2145,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -2157,11 +2157,11 @@
"outputs": [],
"source": [
"block1 = ResidualBlock(2, 30)\n",
"model = keras.models.Sequential([\n",
" keras.layers.Dense(30, activation=\"elu\", kernel_initializer=\"he_normal\"),\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(30, activation=\"elu\", kernel_initializer=\"he_normal\"),\n",
" block1, block1, block1, block1,\n",
" ResidualBlock(2, 30),\n",
" keras.layers.Dense(1)\n",
" tf.keras.layers.Dense(1)\n",
"])"
]
},
@ -2189,7 +2189,7 @@
"metadata": {},
"source": [
"**Note**: the following code has two differences with the code in the book:\n",
"1. It creates a `keras.metrics.Mean()` metric in the constructor and uses it in the `call()` method to track the mean reconstruction loss. Since we only want to do this during training, we add a `training` argument to the `call()` method, and if `training` is `True`, then we update `reconstruction_mean` and we call `self.add_metric()` to ensure it's displayed properly.\n",
"1. It creates a `tf.keras.metrics.Mean()` metric in the constructor and uses it in the `call()` method to track the mean reconstruction loss. Since we only want to do this during training, we add a `training` argument to the `call()` method, and if `training` is `True`, then we update `reconstruction_mean` and we call `self.add_metric()` to ensure it's displayed properly.\n",
"2. Due to an issue introduced in TF 2.2 ([#46858](https://github.com/tensorflow/tensorflow/issues/46858)), we must not call `super().build()` inside the `build()` method."
]
},
@ -2199,18 +2199,18 @@
"metadata": {},
"outputs": [],
"source": [
"class ReconstructingRegressor(keras.Model):\n",
"class ReconstructingRegressor(tf.keras.Model):\n",
" def __init__(self, output_dim, **kwargs):\n",
" super().__init__(**kwargs)\n",
" self.hidden = [keras.layers.Dense(30, activation=\"selu\",\n",
" self.hidden = [tf.keras.layers.Dense(30, activation=\"selu\",\n",
" kernel_initializer=\"lecun_normal\")\n",
" for _ in range(5)]\n",
" self.out = keras.layers.Dense(output_dim)\n",
" self.reconstruction_mean = keras.metrics.Mean(name=\"reconstruction_error\")\n",
" self.out = tf.keras.layers.Dense(output_dim)\n",
" self.reconstruction_mean = tf.keras.metrics.Mean(name=\"reconstruction_error\")\n",
"\n",
" def build(self, batch_input_shape):\n",
" n_inputs = batch_input_shape[-1]\n",
" self.reconstruct = keras.layers.Dense(n_inputs)\n",
" self.reconstruct = tf.keras.layers.Dense(n_inputs)\n",
" #super().build(batch_input_shape)\n",
"\n",
" def call(self, inputs, training=None):\n",
@ -2232,7 +2232,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -2554,7 +2554,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -2565,11 +2565,11 @@
"metadata": {},
"outputs": [],
"source": [
"l2_reg = keras.regularizers.l2(0.05)\n",
"model = keras.models.Sequential([\n",
" keras.layers.Dense(30, activation=\"elu\", kernel_initializer=\"he_normal\",\n",
"l2_reg = tf.keras.regularizers.l2(0.05)\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(30, activation=\"elu\", kernel_initializer=\"he_normal\",\n",
" kernel_regularizer=l2_reg),\n",
" keras.layers.Dense(1, kernel_regularizer=l2_reg)\n",
" tf.keras.layers.Dense(1, kernel_regularizer=l2_reg)\n",
"])"
]
},
@ -2606,8 +2606,8 @@
"source": [
"import time\n",
"\n",
"mean_loss = keras.metrics.Mean(name=\"loss\")\n",
"mean_square = keras.metrics.Mean(name=\"mean_square\")\n",
"mean_loss = tf.keras.metrics.Mean(name=\"loss\")\n",
"mean_square = tf.keras.metrics.Mean(name=\"mean_square\")\n",
"for i in range(1, 50 + 1):\n",
" loss = 1 / i\n",
" mean_loss(loss)\n",
@ -2666,8 +2666,8 @@
"metadata": {},
"outputs": [],
"source": [
"mean_loss = keras.metrics.Mean(name=\"loss\")\n",
"mean_square = keras.metrics.Mean(name=\"mean_square\")\n",
"mean_loss = tf.keras.metrics.Mean(name=\"loss\")\n",
"mean_square = tf.keras.metrics.Mean(name=\"mean_square\")\n",
"for i in range(1, 50 + 1):\n",
" loss = 1 / i\n",
" mean_loss(loss)\n",
@ -2682,7 +2682,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -2696,10 +2696,10 @@
"n_epochs = 5\n",
"batch_size = 32\n",
"n_steps = len(X_train) // batch_size\n",
"optimizer = keras.optimizers.Nadam(learning_rate=0.01)\n",
"loss_fn = keras.losses.mean_squared_error\n",
"mean_loss = keras.metrics.Mean()\n",
"metrics = [keras.metrics.MeanAbsoluteError()]"
"optimizer = tf.keras.optimizers.Nadam(learning_rate=0.01)\n",
"loss_fn = tf.keras.losses.mean_squared_error\n",
"mean_loss = tf.keras.metrics.Mean()\n",
"metrics = [tf.keras.metrics.MeanAbsoluteError()]"
]
},
{
@ -3013,7 +3013,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -3341,11 +3341,11 @@
"outputs": [],
"source": [
"# Custom layer\n",
"class MyDense(keras.layers.Layer):\n",
"class MyDense(tf.keras.layers.Layer):\n",
" def __init__(self, units, activation=None, **kwargs):\n",
" super().__init__(**kwargs)\n",
" self.units = units\n",
" self.activation = keras.activations.get(activation)\n",
" self.activation = tf.keras.activations.get(activation)\n",
"\n",
" def build(self, input_shape):\n",
" self.kernel = self.add_weight(name='kernel', \n",
@ -3369,7 +3369,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -3381,7 +3381,7 @@
"outputs": [],
"source": [
"# Custom model\n",
"class MyModel(keras.models.Model):\n",
"class MyModel(tf.keras.Model):\n",
" def __init__(self, **kwargs):\n",
" super().__init__(**kwargs)\n",
" self.hidden1 = MyDense(30, activation=\"relu\")\n",
@ -3392,7 +3392,7 @@
" print(\"Tracing MyModel.call()\")\n",
" hidden1 = self.hidden1(input)\n",
" hidden2 = self.hidden2(hidden1)\n",
" concat = keras.layers.concatenate([input, hidden2])\n",
" concat = tf.keras.layers.concatenate([input, hidden2])\n",
" output = self.output_(concat)\n",
" return output\n",
"\n",
@ -3432,7 +3432,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -3486,7 +3486,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -3540,7 +3540,7 @@
"metadata": {},
"outputs": [],
"source": [
"class MyMomentumOptimizer(keras.optimizers.Optimizer):\n",
"class MyMomentumOptimizer(tf.keras.optimizers.Optimizer):\n",
" def __init__(self, learning_rate=0.001, momentum=0.9, name=\"MyMomentumOptimizer\", **kwargs):\n",
" \"\"\"Call super().__init__() and use _set_hyper() to store hyperparameters\"\"\"\n",
" super().__init__(name, **kwargs)\n",
@ -3586,7 +3586,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -3597,7 +3597,7 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([keras.layers.Dense(1, input_shape=[8])])\n",
"model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=[8])])\n",
"model.compile(loss=\"mse\", optimizer=MyMomentumOptimizer())\n",
"model.fit(X_train_scaled, y_train, epochs=5)"
]
@ -3654,7 +3654,7 @@
"metadata": {},
"outputs": [],
"source": [
"class LayerNormalization(keras.layers.Layer):\n",
"class LayerNormalization(tf.keras.layers.Layer):\n",
" def __init__(self, eps=0.001, **kwargs):\n",
" super().__init__(**kwargs)\n",
" self.eps = eps\n",
@ -3692,7 +3692,7 @@
"metadata": {},
"source": [
"### c.\n",
"_Exercise: Ensure that your custom layer produces the same (or very nearly the same) output as the `keras.layers.LayerNormalization` layer._"
"_Exercise: Ensure that your custom layer produces the same (or very nearly the same) output as the `tf.keras.layers.LayerNormalization` layer._"
]
},
{
@ -3711,9 +3711,9 @@
"X = X_train.astype(np.float32)\n",
"\n",
"custom_layer_norm = LayerNormalization()\n",
"keras_layer_norm = keras.layers.LayerNormalization()\n",
"keras_layer_norm = tf.keras.layers.LayerNormalization()\n",
"\n",
"tf.reduce_mean(keras.losses.mean_absolute_error(\n",
"tf.reduce_mean(tf.keras.losses.mean_absolute_error(\n",
" keras_layer_norm(X), custom_layer_norm(X)))"
]
},
@ -3736,7 +3736,7 @@
"custom_layer_norm.set_weights([random_alpha, random_beta])\n",
"keras_layer_norm.set_weights([random_alpha, random_beta])\n",
"\n",
"tf.reduce_mean(keras.losses.mean_absolute_error(\n",
"tf.reduce_mean(tf.keras.losses.mean_absolute_error(\n",
" keras_layer_norm(X), custom_layer_norm(X)))"
]
},
@ -3769,7 +3769,7 @@
"metadata": {},
"outputs": [],
"source": [
"(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data()\n",
"(X_train_full, y_train_full), (X_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()\n",
"X_train_full = X_train_full.astype(np.float32) / 255.\n",
"X_valid, X_train = X_train_full[:5000], X_train_full[5000:]\n",
"y_valid, y_train = y_train_full[:5000], y_train_full[5000:]\n",
@ -3782,7 +3782,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -3793,10 +3793,10 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
" keras.layers.Flatten(input_shape=[28, 28]),\n",
" keras.layers.Dense(100, activation=\"relu\"),\n",
" keras.layers.Dense(10, activation=\"softmax\"),\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Flatten(input_shape=[28, 28]),\n",
" tf.keras.layers.Dense(100, activation=\"relu\"),\n",
" tf.keras.layers.Dense(10, activation=\"softmax\"),\n",
"])"
]
},
@ -3809,10 +3809,10 @@
"n_epochs = 5\n",
"batch_size = 32\n",
"n_steps = len(X_train) // batch_size\n",
"optimizer = keras.optimizers.Nadam(learning_rate=0.01)\n",
"loss_fn = keras.losses.sparse_categorical_crossentropy\n",
"mean_loss = keras.metrics.Mean()\n",
"metrics = [keras.metrics.SparseCategoricalAccuracy()]"
"optimizer = tf.keras.optimizers.Nadam(learning_rate=0.01)\n",
"loss_fn = tf.keras.losses.sparse_categorical_crossentropy\n",
"mean_loss = tf.keras.metrics.Mean()\n",
"metrics = [tf.keras.metrics.SparseCategoricalAccuracy()]"
]
},
{
@ -3844,7 +3844,7 @@
" steps.set_postfix(status)\n",
" y_pred = model(X_valid)\n",
" status[\"val_loss\"] = np.mean(loss_fn(y_valid, y_pred))\n",
" status[\"val_accuracy\"] = np.mean(keras.metrics.sparse_categorical_accuracy(\n",
" status[\"val_accuracy\"] = np.mean(tf.keras.metrics.sparse_categorical_accuracy(\n",
" tf.constant(y_valid, dtype=np.float32), y_pred))\n",
" steps.set_postfix(status)\n",
" for metric in [mean_loss] + metrics:\n",
@ -3865,7 +3865,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -3876,14 +3876,14 @@
"metadata": {},
"outputs": [],
"source": [
"lower_layers = keras.models.Sequential([\n",
" keras.layers.Flatten(input_shape=[28, 28]),\n",
" keras.layers.Dense(100, activation=\"relu\"),\n",
"lower_layers = tf.keras.Sequential([\n",
" tf.keras.layers.Flatten(input_shape=[28, 28]),\n",
" tf.keras.layers.Dense(100, activation=\"relu\"),\n",
"])\n",
"upper_layers = keras.models.Sequential([\n",
" keras.layers.Dense(10, activation=\"softmax\"),\n",
"upper_layers = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(10, activation=\"softmax\"),\n",
"])\n",
"model = keras.models.Sequential([\n",
"model = tf.keras.Sequential([\n",
" lower_layers, upper_layers\n",
"])"
]
@ -3894,8 +3894,8 @@
"metadata": {},
"outputs": [],
"source": [
"lower_optimizer = keras.optimizers.SGD(learning_rate=1e-4)\n",
"upper_optimizer = keras.optimizers.Nadam(learning_rate=1e-3)"
"lower_optimizer = tf.keras.optimizers.SGD(learning_rate=1e-4)\n",
"upper_optimizer = tf.keras.optimizers.Nadam(learning_rate=1e-3)"
]
},
{
@ -3907,9 +3907,9 @@
"n_epochs = 5\n",
"batch_size = 32\n",
"n_steps = len(X_train) // batch_size\n",
"loss_fn = keras.losses.sparse_categorical_crossentropy\n",
"mean_loss = keras.metrics.Mean()\n",
"metrics = [keras.metrics.SparseCategoricalAccuracy()]"
"loss_fn = tf.keras.losses.sparse_categorical_crossentropy\n",
"mean_loss = tf.keras.metrics.Mean()\n",
"metrics = [tf.keras.metrics.SparseCategoricalAccuracy()]"
]
},
{
@ -3944,7 +3944,7 @@
" steps.set_postfix(status)\n",
" y_pred = model(X_valid)\n",
" status[\"val_loss\"] = np.mean(loss_fn(y_valid, y_pred))\n",
" status[\"val_accuracy\"] = np.mean(keras.metrics.sparse_categorical_accuracy(\n",
" status[\"val_accuracy\"] = np.mean(tf.keras.metrics.sparse_categorical_accuracy(\n",
" tf.constant(y_valid, dtype=np.float32), y_pred))\n",
" steps.set_postfix(status)\n",
" for metric in [mean_loss] + metrics:\n",

View File

@ -548,13 +548,13 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.Dense(30, activation=\"relu\", input_shape=X_train.shape[1:]),\n",
" keras.layers.Dense(1),\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(30, activation=\"relu\", input_shape=X_train.shape[1:]),\n",
" tf.keras.layers.Dense(1),\n",
"])"
]
},
@ -564,7 +564,7 @@
"metadata": {},
"outputs": [],
"source": [
"model.compile(loss=\"mse\", optimizer=keras.optimizers.SGD(learning_rate=1e-3))"
"model.compile(loss=\"mse\", optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3))"
]
},
{
@ -606,8 +606,8 @@
"metadata": {},
"outputs": [],
"source": [
"optimizer = keras.optimizers.Nadam(learning_rate=0.01)\n",
"loss_fn = keras.losses.mean_squared_error\n",
"optimizer = tf.keras.optimizers.Nadam(learning_rate=0.01)\n",
"loss_fn = tf.keras.losses.mean_squared_error\n",
"\n",
"n_epochs = 5\n",
"batch_size = 32\n",
@ -631,7 +631,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -642,8 +642,8 @@
"metadata": {},
"outputs": [],
"source": [
"optimizer = keras.optimizers.Nadam(learning_rate=0.01)\n",
"loss_fn = keras.losses.mean_squared_error\n",
"optimizer = tf.keras.optimizers.Nadam(learning_rate=0.01)\n",
"loss_fn = tf.keras.losses.mean_squared_error\n",
"\n",
"@tf.function\n",
"def train(model, n_epochs, batch_size=32,\n",
@ -668,7 +668,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -679,8 +679,8 @@
"metadata": {},
"outputs": [],
"source": [
"optimizer = keras.optimizers.Nadam(learning_rate=0.01)\n",
"loss_fn = keras.losses.mean_squared_error\n",
"optimizer = tf.keras.optimizers.Nadam(learning_rate=0.01)\n",
"loss_fn = tf.keras.losses.mean_squared_error\n",
"\n",
"@tf.function\n",
"def train(model, n_epochs, batch_size=32,\n",
@ -1632,7 +1632,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -1667,12 +1667,12 @@
"outputs": [],
"source": [
"columns_without_target = columns[:-1]\n",
"model = keras.models.Sequential([\n",
" keras.layers.DenseFeatures(feature_columns=columns_without_target),\n",
" keras.layers.Dense(1)\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.DenseFeatures(feature_columns=columns_without_target),\n",
" tf.keras.layers.Dense(1)\n",
"])\n",
"model.compile(loss=\"mse\",\n",
" optimizer=keras.optimizers.SGD(learning_rate=1e-3),\n",
" optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3),\n",
" metrics=[\"accuracy\"])\n",
"model.fit(dataset, steps_per_epoch=len(X_train) // batch_size, epochs=5)"
]
@ -1684,7 +1684,7 @@
"outputs": [],
"source": [
"some_columns = [ocean_proximity_embed, bucketized_income]\n",
"dense_features = keras.layers.DenseFeatures(some_columns)\n",
"dense_features = tf.keras.layers.DenseFeatures(some_columns)\n",
"dense_features({\n",
" \"ocean_proximity\": [[\"NEAR OCEAN\"], [\"INLAND\"], [\"INLAND\"]],\n",
" \"median_income\": [[3.], [7.2], [1.]]\n",
@ -1791,7 +1791,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -1804,12 +1804,12 @@
"source": [
"datasets = tfds.load(name=\"mnist\", batch_size=32, as_supervised=True)\n",
"mnist_train = datasets[\"train\"].repeat().prefetch(1)\n",
"model = keras.models.Sequential([\n",
" keras.layers.Flatten(input_shape=[28, 28, 1]),\n",
" keras.layers.Lambda(lambda images: tf.cast(images, tf.float32)),\n",
" keras.layers.Dense(10, activation=\"softmax\")])\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Flatten(input_shape=[28, 28, 1]),\n",
" tf.keras.layers.Lambda(lambda images: tf.cast(images, tf.float32)),\n",
" tf.keras.layers.Dense(10, activation=\"softmax\")])\n",
"model.compile(loss=\"sparse_categorical_crossentropy\",\n",
" optimizer=keras.optimizers.SGD(learning_rate=1e-3),\n",
" optimizer=tf.keras.optimizers.SGD(learning_rate=1e-3),\n",
" metrics=[\"accuracy\"])\n",
"model.fit(mnist_train, steps_per_epoch=60000 // 32, epochs=5)"
]
@ -1827,7 +1827,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -1843,10 +1843,10 @@
"hub_layer = hub.KerasLayer(\"https://tfhub.dev/google/nnlm-en-dim50/2\",\n",
" output_shape=[50], input_shape=[], dtype=tf.string)\n",
"\n",
"model = keras.Sequential()\n",
"model = tf.keras.Sequential()\n",
"model.add(hub_layer)\n",
"model.add(keras.layers.Dense(16, activation='relu'))\n",
"model.add(keras.layers.Dense(1, activation='sigmoid'))\n",
"model.add(tf.keras.layers.Dense(16, activation='relu'))\n",
"model.add(tf.keras.layers.Dense(1, activation='sigmoid'))\n",
"\n",
"model.summary()"
]
@ -1890,7 +1890,7 @@
"metadata": {},
"outputs": [],
"source": [
"(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data()\n",
"(X_train_full, y_train_full), (X_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()\n",
"X_valid, X_train = X_train_full[:5000], X_train_full[5000:]\n",
"y_valid, y_train = y_train_full[:5000], y_train_full[5000:]"
]
@ -1901,7 +1901,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)"
]
@ -2052,31 +2052,31 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"tf.random.set_seed(42)\n",
"np.random.seed(42)\n",
"\n",
"class Standardization(keras.layers.Layer):\n",
"class Standardization(tf.keras.layers.Layer):\n",
" def adapt(self, data_sample):\n",
" self.means_ = np.mean(data_sample, axis=0, keepdims=True)\n",
" self.stds_ = np.std(data_sample, axis=0, keepdims=True)\n",
" def call(self, inputs):\n",
" return (inputs - self.means_) / (self.stds_ + keras.backend.epsilon())\n",
" return (inputs - self.means_) / (self.stds_ + tf.keras.backend.epsilon())\n",
"\n",
"standardization = Standardization(input_shape=[28, 28])\n",
"# or perhaps soon:\n",
"#standardization = keras.layers.Normalization()\n",
"#standardization = tf.keras.layers.Normalization()\n",
"\n",
"sample_image_batches = train_set.take(100).map(lambda image, label: image)\n",
"sample_images = np.concatenate(list(sample_image_batches.as_numpy_iterator()),\n",
" axis=0).astype(np.float32)\n",
"standardization.adapt(sample_images)\n",
"\n",
"model = keras.models.Sequential([\n",
"model = tf.keras.Sequential([\n",
" standardization,\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(100, activation=\"relu\"),\n",
" keras.layers.Dense(10, activation=\"softmax\")\n",
" tf.keras.layers.Flatten(),\n",
" tf.keras.layers.Dense(100, activation=\"relu\"),\n",
" tf.keras.layers.Dense(10, activation=\"softmax\")\n",
"])\n",
"model.compile(loss=\"sparse_categorical_crossentropy\",\n",
" optimizer=\"nadam\", metrics=[\"accuracy\"])"
@ -2137,7 +2137,7 @@
"\n",
"root = \"http://ai.stanford.edu/~amaas/data/sentiment/\"\n",
"filename = \"aclImdb_v1.tar.gz\"\n",
"filepath = keras.utils.get_file(filename, root + filename, extract=True)\n",
"filepath = tf.keras.utils.get_file(filename, root + filename, extract=True)\n",
"path = Path(filepath).with_name(\"aclImdb\")\n",
"path"
]
@ -2416,7 +2416,7 @@
"metadata": {},
"outputs": [],
"source": [
"class TextVectorization(keras.layers.Layer):\n",
"class TextVectorization(tf.keras.layers.Layer):\n",
" def __init__(self, max_vocabulary_size=1000, n_oov_buckets=100, dtype=tf.string, **kwargs):\n",
" super().__init__(dtype=dtype, **kwargs)\n",
" self.max_vocabulary_size = max_vocabulary_size\n",
@ -2549,7 +2549,7 @@
"metadata": {},
"outputs": [],
"source": [
"class BagOfWords(keras.layers.Layer):\n",
"class BagOfWords(tf.keras.layers.Layer):\n",
" def __init__(self, n_tokens, dtype=tf.int32, **kwargs):\n",
" super().__init__(dtype=dtype, **kwargs)\n",
" self.n_tokens = n_tokens\n",
@ -2605,11 +2605,11 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
"model = tf.keras.Sequential([\n",
" text_vectorization,\n",
" bag_of_words,\n",
" keras.layers.Dense(100, activation=\"relu\"),\n",
" keras.layers.Dense(1, activation=\"sigmoid\"),\n",
" tf.keras.layers.Dense(100, activation=\"relu\"),\n",
" tf.keras.layers.Dense(1, activation=\"sigmoid\"),\n",
"])\n",
"model.compile(loss=\"binary_crossentropy\", optimizer=\"nadam\",\n",
" metrics=[\"accuracy\"])\n",
@ -2702,14 +2702,14 @@
"source": [
"embedding_size = 20\n",
"\n",
"model = keras.models.Sequential([\n",
"model = tf.keras.Sequential([\n",
" text_vectorization,\n",
" keras.layers.Embedding(input_dim=n_tokens,\n",
" tf.keras.layers.Embedding(input_dim=n_tokens,\n",
" output_dim=embedding_size,\n",
" mask_zero=True), # <pad> tokens => zero vectors\n",
" keras.layers.Lambda(compute_mean_embedding),\n",
" keras.layers.Dense(100, activation=\"relu\"),\n",
" keras.layers.Dense(1, activation=\"sigmoid\"),\n",
" tf.keras.layers.Lambda(compute_mean_embedding),\n",
" tf.keras.layers.Dense(100, activation=\"relu\"),\n",
" tf.keras.layers.Dense(1, activation=\"sigmoid\"),\n",
"])"
]
},

View File

@ -217,7 +217,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's create a 2D convolutional layer, using `keras.layers.Conv2D()`:"
"Let's create a 2D convolutional layer, using `tf.keras.layers.Conv2D()`:"
]
},
{
@ -229,7 +229,7 @@
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"conv = keras.layers.Conv2D(filters=2, kernel_size=7, strides=1,\n",
"conv = tf.keras.layers.Conv2D(filters=2, kernel_size=7, strides=1,\n",
" padding=\"SAME\", activation=\"relu\", input_shape=outputs.shape)"
]
},
@ -399,8 +399,8 @@
"kernel_size = 7\n",
"strides = 2\n",
"\n",
"conv_valid = keras.layers.Conv2D(filters=1, kernel_size=kernel_size, strides=strides, padding=\"VALID\")\n",
"conv_same = keras.layers.Conv2D(filters=1, kernel_size=kernel_size, strides=strides, padding=\"SAME\")\n",
"conv_valid = tf.keras.layers.Conv2D(filters=1, kernel_size=kernel_size, strides=strides, padding=\"VALID\")\n",
"conv_same = tf.keras.layers.Conv2D(filters=1, kernel_size=kernel_size, strides=strides, padding=\"SAME\")\n",
"\n",
"valid_output = conv_valid(manual_same_padding(images, kernel_size, strides))\n",
"\n",
@ -435,7 +435,7 @@
"metadata": {},
"outputs": [],
"source": [
"max_pool = keras.layers.MaxPool2D(pool_size=2)"
"max_pool = tf.keras.layers.MaxPool2D(pool_size=2)"
]
},
{
@ -482,7 +482,7 @@
"metadata": {},
"outputs": [],
"source": [
"class DepthMaxPool(keras.layers.Layer):\n",
"class DepthMaxPool(tf.keras.layers.Layer):\n",
" def __init__(self, pool_size, strides=None, padding=\"VALID\", **kwargs):\n",
" super().__init__(**kwargs)\n",
" if strides is None:\n",
@ -522,7 +522,7 @@
"metadata": {},
"outputs": [],
"source": [
"depth_pool = keras.layers.Lambda(lambda X: tf.nn.max_pool(\n",
"depth_pool = tf.keras.layers.Lambda(lambda X: tf.nn.max_pool(\n",
" X, ksize=(1, 1, 1, 3), strides=(1, 1, 1, 3), padding=\"VALID\"))\n",
"with tf.device(\"/cpu:0\"): # there is no GPU-kernel yet\n",
" depth_output = depth_pool(cropped_images)\n",
@ -559,7 +559,7 @@
"metadata": {},
"outputs": [],
"source": [
"avg_pool = keras.layers.AvgPool2D(pool_size=2)"
"avg_pool = tf.keras.layers.AvgPool2D(pool_size=2)"
]
},
{
@ -604,7 +604,7 @@
"metadata": {},
"outputs": [],
"source": [
"global_avg_pool = keras.layers.GlobalAvgPool2D()\n",
"global_avg_pool = tf.keras.layers.GlobalAvgPool2D()\n",
"global_avg_pool(cropped_images)"
]
},
@ -614,7 +614,7 @@
"metadata": {},
"outputs": [],
"source": [
"output_global_avg2 = keras.layers.Lambda(lambda X: tf.reduce_mean(X, axis=[1, 2]))\n",
"output_global_avg2 = tf.keras.layers.Lambda(lambda X: tf.reduce_mean(X, axis=[1, 2]))\n",
"output_global_avg2(cropped_images)"
]
},
@ -631,7 +631,7 @@
"metadata": {},
"outputs": [],
"source": [
"(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data()\n",
"(X_train_full, y_train_full), (X_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()\n",
"X_train, X_valid = X_train_full[:-5000], X_train_full[-5000:]\n",
"y_train, y_valid = y_train_full[:-5000], y_train_full[-5000:]\n",
"\n",
@ -654,24 +654,24 @@
"source": [
"from functools import partial\n",
"\n",
"DefaultConv2D = partial(keras.layers.Conv2D,\n",
"DefaultConv2D = partial(tf.keras.layers.Conv2D,\n",
" kernel_size=3, activation='relu', padding=\"SAME\")\n",
"\n",
"model = keras.models.Sequential([\n",
"model = tf.keras.Sequential([\n",
" DefaultConv2D(filters=64, kernel_size=7, input_shape=[28, 28, 1]),\n",
" keras.layers.MaxPooling2D(pool_size=2),\n",
" tf.keras.layers.MaxPooling2D(pool_size=2),\n",
" DefaultConv2D(filters=128),\n",
" DefaultConv2D(filters=128),\n",
" keras.layers.MaxPooling2D(pool_size=2),\n",
" tf.keras.layers.MaxPooling2D(pool_size=2),\n",
" DefaultConv2D(filters=256),\n",
" DefaultConv2D(filters=256),\n",
" keras.layers.MaxPooling2D(pool_size=2),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(units=128, activation='relu'),\n",
" keras.layers.Dropout(0.5),\n",
" keras.layers.Dense(units=64, activation='relu'),\n",
" keras.layers.Dropout(0.5),\n",
" keras.layers.Dense(units=10, activation='softmax'),\n",
" tf.keras.layers.MaxPooling2D(pool_size=2),\n",
" tf.keras.layers.Flatten(),\n",
" tf.keras.layers.Dense(units=128, activation='relu'),\n",
" tf.keras.layers.Dropout(0.5),\n",
" tf.keras.layers.Dense(units=64, activation='relu'),\n",
" tf.keras.layers.Dropout(0.5),\n",
" tf.keras.layers.Dense(units=10, activation='softmax'),\n",
"])"
]
},
@ -701,24 +701,24 @@
"metadata": {},
"outputs": [],
"source": [
"DefaultConv2D = partial(keras.layers.Conv2D, kernel_size=3, strides=1,\n",
"DefaultConv2D = partial(tf.keras.layers.Conv2D, kernel_size=3, strides=1,\n",
" padding=\"SAME\", use_bias=False)\n",
"\n",
"class ResidualUnit(keras.layers.Layer):\n",
"class ResidualUnit(tf.keras.layers.Layer):\n",
" def __init__(self, filters, strides=1, activation=\"relu\", **kwargs):\n",
" super().__init__(**kwargs)\n",
" self.activation = keras.activations.get(activation)\n",
" self.activation = tf.keras.activations.get(activation)\n",
" self.main_layers = [\n",
" DefaultConv2D(filters, strides=strides),\n",
" keras.layers.BatchNormalization(),\n",
" tf.keras.layers.BatchNormalization(),\n",
" self.activation,\n",
" DefaultConv2D(filters),\n",
" keras.layers.BatchNormalization()]\n",
" tf.keras.layers.BatchNormalization()]\n",
" self.skip_layers = []\n",
" if strides > 1:\n",
" self.skip_layers = [\n",
" DefaultConv2D(filters, kernel_size=1, strides=strides),\n",
" keras.layers.BatchNormalization()]\n",
" tf.keras.layers.BatchNormalization()]\n",
"\n",
" def call(self, inputs):\n",
" Z = inputs\n",
@ -736,20 +736,20 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential()\n",
"model = tf.keras.Sequential()\n",
"model.add(DefaultConv2D(64, kernel_size=7, strides=2,\n",
" input_shape=[224, 224, 3]))\n",
"model.add(keras.layers.BatchNormalization())\n",
"model.add(keras.layers.Activation(\"relu\"))\n",
"model.add(keras.layers.MaxPool2D(pool_size=3, strides=2, padding=\"SAME\"))\n",
"model.add(tf.keras.layers.BatchNormalization())\n",
"model.add(tf.keras.layers.Activation(\"relu\"))\n",
"model.add(tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding=\"SAME\"))\n",
"prev_filters = 64\n",
"for filters in [64] * 3 + [128] * 4 + [256] * 6 + [512] * 3:\n",
" strides = 1 if filters == prev_filters else 2\n",
" model.add(ResidualUnit(filters, strides=strides))\n",
" prev_filters = filters\n",
"model.add(keras.layers.GlobalAvgPool2D())\n",
"model.add(keras.layers.Flatten())\n",
"model.add(keras.layers.Dense(10, activation=\"softmax\"))"
"model.add(tf.keras.layers.GlobalAvgPool2D())\n",
"model.add(tf.keras.layers.Flatten())\n",
"model.add(tf.keras.layers.Dense(10, activation=\"softmax\"))"
]
},
{
@ -774,7 +774,7 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.applications.resnet50.ResNet50(weights=\"imagenet\")"
"model = tf.keras.applications.resnet50.ResNet50(weights=\"imagenet\")"
]
},
{
@ -830,7 +830,7 @@
"metadata": {},
"outputs": [],
"source": [
"inputs = keras.applications.resnet50.preprocess_input(images_resized * 255)\n",
"inputs = tf.keras.applications.resnet50.preprocess_input(images_resized * 255)\n",
"Y_proba = model.predict(inputs)"
]
},
@ -849,7 +849,7 @@
"metadata": {},
"outputs": [],
"source": [
"top_K = keras.applications.resnet50.decode_predictions(Y_proba, top=3)\n",
"top_K = tf.keras.applications.resnet50.decode_predictions(Y_proba, top=3)\n",
"for image_index in range(len(images)):\n",
" print(\"Image #{}\".format(image_index))\n",
" for class_id, name, y_proba in top_K[image_index]:\n",
@ -974,7 +974,7 @@
"source": [
"def preprocess(image, label):\n",
" resized_image = tf.image.resize(image, [224, 224])\n",
" final_image = keras.applications.xception.preprocess_input(resized_image)\n",
" final_image = tf.keras.applications.xception.preprocess_input(resized_image)\n",
" return final_image, label"
]
},
@ -1012,7 +1012,7 @@
" else:\n",
" cropped_image = central_crop(image)\n",
" resized_image = tf.image.resize(cropped_image, [224, 224])\n",
" final_image = keras.applications.xception.preprocess_input(resized_image)\n",
" final_image = tf.keras.applications.xception.preprocess_input(resized_image)\n",
" return final_image, label\n",
"\n",
"batch_size = 32\n",
@ -1062,11 +1062,11 @@
"metadata": {},
"outputs": [],
"source": [
"base_model = keras.applications.xception.Xception(weights=\"imagenet\",\n",
"base_model = tf.keras.applications.xception.Xception(weights=\"imagenet\",\n",
" include_top=False)\n",
"avg = keras.layers.GlobalAveragePooling2D()(base_model.output)\n",
"output = keras.layers.Dense(n_classes, activation=\"softmax\")(avg)\n",
"model = keras.models.Model(inputs=base_model.input, outputs=output)"
"avg = tf.keras.layers.GlobalAveragePooling2D()(base_model.output)\n",
"output = tf.keras.layers.Dense(n_classes, activation=\"softmax\")(avg)\n",
"model = tf.keras.Model(inputs=base_model.input, outputs=output)"
]
},
{
@ -1088,7 +1088,7 @@
"for layer in base_model.layers:\n",
" layer.trainable = False\n",
"\n",
"optimizer = keras.optimizers.SGD(learning_rate=0.2, momentum=0.9, decay=0.01)\n",
"optimizer = tf.keras.optimizers.SGD(learning_rate=0.2, momentum=0.9, decay=0.01)\n",
"model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer,\n",
" metrics=[\"accuracy\"])\n",
"history = model.fit(train_set,\n",
@ -1107,7 +1107,7 @@
"for layer in base_model.layers:\n",
" layer.trainable = True\n",
"\n",
"optimizer = keras.optimizers.SGD(learning_rate=0.01, momentum=0.9,\n",
"optimizer = tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9,\n",
" nesterov=True, decay=0.001)\n",
"model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer,\n",
" metrics=[\"accuracy\"])\n",
@ -1131,12 +1131,12 @@
"metadata": {},
"outputs": [],
"source": [
"base_model = keras.applications.xception.Xception(weights=\"imagenet\",\n",
"base_model = tf.keras.applications.xception.Xception(weights=\"imagenet\",\n",
" include_top=False)\n",
"avg = keras.layers.GlobalAveragePooling2D()(base_model.output)\n",
"class_output = keras.layers.Dense(n_classes, activation=\"softmax\")(avg)\n",
"loc_output = keras.layers.Dense(4)(avg)\n",
"model = keras.models.Model(inputs=base_model.input,\n",
"avg = tf.keras.layers.GlobalAveragePooling2D()(base_model.output)\n",
"class_output = tf.keras.layers.Dense(n_classes, activation=\"softmax\")(avg)\n",
"loc_output = tf.keras.layers.Dense(4)(avg)\n",
"model = tf.keras.Model(inputs=base_model.input,\n",
" outputs=[class_output, loc_output])\n",
"model.compile(loss=[\"sparse_categorical_crossentropy\", \"mse\"],\n",
" loss_weights=[0.8, 0.2], # depends on what you care most about\n",
@ -1220,7 +1220,7 @@
"tf.random.set_seed(42)\n",
"X = images_resized.numpy()\n",
"\n",
"conv_transpose = keras.layers.Conv2DTranspose(filters=5, kernel_size=3, strides=2, padding=\"VALID\")\n",
"conv_transpose = tf.keras.layers.Conv2DTranspose(filters=5, kernel_size=3, strides=2, padding=\"VALID\")\n",
"output = conv_transpose(X)\n",
"output.shape"
]
@ -1360,7 +1360,7 @@
"metadata": {},
"outputs": [],
"source": [
"(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()\n",
"(X_train_full, y_train_full), (X_test, y_test) = tf.keras.datasets.mnist.load_data()\n",
"X_train_full = X_train_full / 255.\n",
"X_test = X_test / 255.\n",
"X_train, X_valid = X_train_full[:-5000], X_train_full[-5000:]\n",
@ -1377,19 +1377,19 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"tf.random.set_seed(42)\n",
"np.random.seed(42)\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.Conv2D(32, kernel_size=3, padding=\"same\", activation=\"relu\"),\n",
" keras.layers.Conv2D(64, kernel_size=3, padding=\"same\", activation=\"relu\"),\n",
" keras.layers.MaxPool2D(),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dropout(0.25),\n",
" keras.layers.Dense(128, activation=\"relu\"),\n",
" keras.layers.Dropout(0.5),\n",
" keras.layers.Dense(10, activation=\"softmax\")\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Conv2D(32, kernel_size=3, padding=\"same\", activation=\"relu\"),\n",
" tf.keras.layers.Conv2D(64, kernel_size=3, padding=\"same\", activation=\"relu\"),\n",
" tf.keras.layers.MaxPool2D(),\n",
" tf.keras.layers.Flatten(),\n",
" tf.keras.layers.Dropout(0.25),\n",
" tf.keras.layers.Dense(128, activation=\"relu\"),\n",
" tf.keras.layers.Dropout(0.5),\n",
" tf.keras.layers.Dense(10, activation=\"softmax\")\n",
"])\n",
"model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"nadam\",\n",
" metrics=[\"accuracy\"])\n",

View File

@ -211,7 +211,7 @@
"outputs": [],
"source": [
"y_pred = X_valid[:, -1]\n",
"np.mean(keras.losses.mean_squared_error(y_valid, y_pred))"
"np.mean(tf.keras.losses.mean_squared_error(y_valid, y_pred))"
]
},
{
@ -240,9 +240,9 @@
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.Flatten(input_shape=[50, 1]),\n",
" keras.layers.Dense(1)\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Flatten(input_shape=[50, 1]),\n",
" tf.keras.layers.Dense(1)\n",
"])\n",
"\n",
"model.compile(loss=\"mse\", optimizer=\"adam\")\n",
@ -306,11 +306,11 @@
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.SimpleRNN(1, input_shape=[None, 1])\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.SimpleRNN(1, input_shape=[None, 1])\n",
"])\n",
"\n",
"optimizer = keras.optimizers.Adam(learning_rate=0.005)\n",
"optimizer = tf.keras.optimizers.Adam(learning_rate=0.005)\n",
"model.compile(loss=\"mse\", optimizer=optimizer)\n",
"history = model.fit(X_train, y_train, epochs=20,\n",
" validation_data=(X_valid, y_valid))"
@ -362,10 +362,10 @@
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]),\n",
" keras.layers.SimpleRNN(20, return_sequences=True),\n",
" keras.layers.SimpleRNN(1)\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]),\n",
" tf.keras.layers.SimpleRNN(20, return_sequences=True),\n",
" tf.keras.layers.SimpleRNN(1)\n",
"])\n",
"\n",
"model.compile(loss=\"mse\", optimizer=\"adam\")\n",
@ -419,10 +419,10 @@
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]),\n",
" keras.layers.SimpleRNN(20),\n",
" keras.layers.Dense(1)\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]),\n",
" tf.keras.layers.SimpleRNN(20),\n",
" tf.keras.layers.Dense(1)\n",
"])\n",
"\n",
"model.compile(loss=\"mse\", optimizer=\"adam\")\n",
@ -572,7 +572,7 @@
"metadata": {},
"outputs": [],
"source": [
"np.mean(keras.metrics.mean_squared_error(Y_valid, Y_pred))"
"np.mean(tf.keras.metrics.mean_squared_error(Y_valid, Y_pred))"
]
},
{
@ -589,7 +589,7 @@
"outputs": [],
"source": [
"Y_naive_pred = np.tile(X_valid[:, -1], 10) # take the last time step value, and repeat it 10 times\n",
"np.mean(keras.metrics.mean_squared_error(Y_valid, Y_naive_pred))"
"np.mean(tf.keras.metrics.mean_squared_error(Y_valid, Y_naive_pred))"
]
},
{
@ -601,9 +601,9 @@
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.Flatten(input_shape=[50, 1]),\n",
" keras.layers.Dense(10)\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Flatten(input_shape=[50, 1]),\n",
" tf.keras.layers.Dense(10)\n",
"])\n",
"\n",
"model.compile(loss=\"mse\", optimizer=\"adam\")\n",
@ -627,10 +627,10 @@
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]),\n",
" keras.layers.SimpleRNN(20),\n",
" keras.layers.Dense(10)\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]),\n",
" tf.keras.layers.SimpleRNN(20),\n",
" tf.keras.layers.Dense(10)\n",
"])\n",
"\n",
"model.compile(loss=\"mse\", optimizer=\"adam\")\n",
@ -707,16 +707,16 @@
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]),\n",
" keras.layers.SimpleRNN(20, return_sequences=True),\n",
" keras.layers.TimeDistributed(keras.layers.Dense(10))\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]),\n",
" tf.keras.layers.SimpleRNN(20, return_sequences=True),\n",
" tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(10))\n",
"])\n",
"\n",
"def last_time_step_mse(Y_true, Y_pred):\n",
" return keras.metrics.mean_squared_error(Y_true[:, -1], Y_pred[:, -1])\n",
" return tf.keras.metrics.mean_squared_error(Y_true[:, -1], Y_pred[:, -1])\n",
"\n",
"model.compile(loss=\"mse\", optimizer=keras.optimizers.Adam(learning_rate=0.01), metrics=[last_time_step_mse])\n",
"model.compile(loss=\"mse\", optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), metrics=[last_time_step_mse])\n",
"history = model.fit(X_train, Y_train, epochs=20,\n",
" validation_data=(X_valid, Y_valid))"
]
@ -760,12 +760,12 @@
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.SimpleRNN(20, return_sequences=True),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.TimeDistributed(keras.layers.Dense(10))\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.SimpleRNN(20, return_sequences=True, input_shape=[None, 1]),\n",
" tf.keras.layers.BatchNormalization(),\n",
" tf.keras.layers.SimpleRNN(20, return_sequences=True),\n",
" tf.keras.layers.BatchNormalization(),\n",
" tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(10))\n",
"])\n",
"\n",
"model.compile(loss=\"mse\", optimizer=\"adam\", metrics=[last_time_step_mse])\n",
@ -795,15 +795,15 @@
"metadata": {},
"outputs": [],
"source": [
"class LNSimpleRNNCell(keras.layers.Layer):\n",
"class LNSimpleRNNCell(tf.keras.layers.Layer):\n",
" def __init__(self, units, activation=\"tanh\", **kwargs):\n",
" super().__init__(**kwargs)\n",
" self.state_size = units\n",
" self.output_size = units\n",
" self.simple_rnn_cell = keras.layers.SimpleRNNCell(units,\n",
" self.simple_rnn_cell = tf.keras.layers.SimpleRNNCell(units,\n",
" activation=None)\n",
" self.layer_norm = LayerNormalization()\n",
" self.activation = keras.activations.get(activation)\n",
" self.activation = tf.keras.activations.get(activation)\n",
" def get_initial_state(self, inputs=None, batch_size=None, dtype=None):\n",
" if inputs is not None:\n",
" batch_size = tf.shape(inputs)[0]\n",
@ -824,11 +824,11 @@
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.RNN(LNSimpleRNNCell(20), return_sequences=True,\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.RNN(LNSimpleRNNCell(20), return_sequences=True,\n",
" input_shape=[None, 1]),\n",
" keras.layers.RNN(LNSimpleRNNCell(20), return_sequences=True),\n",
" keras.layers.TimeDistributed(keras.layers.Dense(10))\n",
" tf.keras.layers.RNN(LNSimpleRNNCell(20), return_sequences=True),\n",
" tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(10))\n",
"])\n",
"\n",
"model.compile(loss=\"mse\", optimizer=\"adam\", metrics=[last_time_step_mse])\n",
@ -849,7 +849,7 @@
"metadata": {},
"outputs": [],
"source": [
"class MyRNN(keras.layers.Layer):\n",
"class MyRNN(tf.keras.layers.Layer):\n",
" def __init__(self, cell, return_sequences=False, **kwargs):\n",
" super().__init__(**kwargs)\n",
" self.cell = cell\n",
@ -887,11 +887,11 @@
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"model = keras.models.Sequential([\n",
"model = tf.keras.Sequential([\n",
" MyRNN(LNSimpleRNNCell(20), return_sequences=True,\n",
" input_shape=[None, 1]),\n",
" MyRNN(LNSimpleRNNCell(20), return_sequences=True),\n",
" keras.layers.TimeDistributed(keras.layers.Dense(10))\n",
" tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(10))\n",
"])\n",
"\n",
"model.compile(loss=\"mse\", optimizer=\"adam\", metrics=[last_time_step_mse])\n",
@ -917,10 +917,10 @@
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.LSTM(20, return_sequences=True, input_shape=[None, 1]),\n",
" keras.layers.LSTM(20, return_sequences=True),\n",
" keras.layers.TimeDistributed(keras.layers.Dense(10))\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.LSTM(20, return_sequences=True, input_shape=[None, 1]),\n",
" tf.keras.layers.LSTM(20, return_sequences=True),\n",
" tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(10))\n",
"])\n",
"\n",
"model.compile(loss=\"mse\", optimizer=\"adam\", metrics=[last_time_step_mse])\n",
@ -988,10 +988,10 @@
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.GRU(20, return_sequences=True, input_shape=[None, 1]),\n",
" keras.layers.GRU(20, return_sequences=True),\n",
" keras.layers.TimeDistributed(keras.layers.Dense(10))\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.GRU(20, return_sequences=True, input_shape=[None, 1]),\n",
" tf.keras.layers.GRU(20, return_sequences=True),\n",
" tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(10))\n",
"])\n",
"\n",
"model.compile(loss=\"mse\", optimizer=\"adam\", metrics=[last_time_step_mse])\n",
@ -1080,12 +1080,12 @@
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.Conv1D(filters=20, kernel_size=4, strides=2, padding=\"valid\",\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Conv1D(filters=20, kernel_size=4, strides=2, padding=\"valid\",\n",
" input_shape=[None, 1]),\n",
" keras.layers.GRU(20, return_sequences=True),\n",
" keras.layers.GRU(20, return_sequences=True),\n",
" keras.layers.TimeDistributed(keras.layers.Dense(10))\n",
" tf.keras.layers.GRU(20, return_sequences=True),\n",
" tf.keras.layers.GRU(20, return_sequences=True),\n",
" tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(10))\n",
"])\n",
"\n",
"model.compile(loss=\"mse\", optimizer=\"adam\", metrics=[last_time_step_mse])\n",
@ -1124,12 +1124,12 @@
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"model = keras.models.Sequential()\n",
"model.add(keras.layers.InputLayer(input_shape=[None, 1]))\n",
"model = tf.keras.Sequential()\n",
"model.add(tf.keras.layers.InputLayer(input_shape=[None, 1]))\n",
"for rate in (1, 2, 4, 8) * 2:\n",
" model.add(keras.layers.Conv1D(filters=20, kernel_size=2, padding=\"causal\",\n",
" model.add(tf.keras.layers.Conv1D(filters=20, kernel_size=2, padding=\"causal\",\n",
" activation=\"relu\", dilation_rate=rate))\n",
"model.add(keras.layers.Conv1D(filters=10, kernel_size=1))\n",
"model.add(tf.keras.layers.Conv1D(filters=10, kernel_size=1))\n",
"model.compile(loss=\"mse\", optimizer=\"adam\", metrics=[last_time_step_mse])\n",
"history = model.fit(X_train, Y_train, epochs=20,\n",
" validation_data=(X_valid, Y_valid))"
@ -1148,14 +1148,14 @@
"metadata": {},
"outputs": [],
"source": [
"class GatedActivationUnit(keras.layers.Layer):\n",
"class GatedActivationUnit(tf.keras.layers.Layer):\n",
" def __init__(self, activation=\"tanh\", **kwargs):\n",
" super().__init__(**kwargs)\n",
" self.activation = keras.activations.get(activation)\n",
" self.activation = tf.keras.activations.get(activation)\n",
" def call(self, inputs):\n",
" n_filters = inputs.shape[-1] // 2\n",
" linear_output = self.activation(inputs[..., :n_filters])\n",
" gate = keras.activations.sigmoid(inputs[..., n_filters:])\n",
" gate = tf.keras.activations.sigmoid(inputs[..., n_filters:])\n",
" return self.activation(linear_output) * gate"
]
},
@ -1166,11 +1166,11 @@
"outputs": [],
"source": [
"def wavenet_residual_block(inputs, n_filters, dilation_rate):\n",
" z = keras.layers.Conv1D(2 * n_filters, kernel_size=2, padding=\"causal\",\n",
" z = tf.keras.layers.Conv1D(2 * n_filters, kernel_size=2, padding=\"causal\",\n",
" dilation_rate=dilation_rate)(inputs)\n",
" z = GatedActivationUnit()(z)\n",
" z = keras.layers.Conv1D(n_filters, kernel_size=1)(z)\n",
" return keras.layers.Add()([z, inputs]), z"
" z = tf.keras.layers.Conv1D(n_filters, kernel_size=1)(z)\n",
" return tf.keras.layers.Add()([z, inputs]), z"
]
},
{
@ -1179,7 +1179,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
@ -1188,17 +1188,17 @@
"n_filters = 32 # 128 in the paper\n",
"n_outputs = 10 # 256 in the paper\n",
"\n",
"inputs = keras.layers.Input(shape=[None, 1])\n",
"z = keras.layers.Conv1D(n_filters, kernel_size=2, padding=\"causal\")(inputs)\n",
"inputs = tf.keras.layers.Input(shape=[None, 1])\n",
"z = tf.keras.layers.Conv1D(n_filters, kernel_size=2, padding=\"causal\")(inputs)\n",
"skip_to_last = []\n",
"for dilation_rate in [2**i for i in range(n_layers_per_block)] * n_blocks:\n",
" z, skip = wavenet_residual_block(z, n_filters, dilation_rate)\n",
" skip_to_last.append(skip)\n",
"z = keras.activations.relu(keras.layers.Add()(skip_to_last))\n",
"z = keras.layers.Conv1D(n_filters, kernel_size=1, activation=\"relu\")(z)\n",
"Y_proba = keras.layers.Conv1D(n_outputs, kernel_size=1, activation=\"softmax\")(z)\n",
"z = tf.keras.activations.relu(tf.keras.layers.Add()(skip_to_last))\n",
"z = tf.keras.layers.Conv1D(n_filters, kernel_size=1, activation=\"relu\")(z)\n",
"Y_proba = tf.keras.layers.Conv1D(n_outputs, kernel_size=1, activation=\"softmax\")(z)\n",
"\n",
"model = keras.models.Model(inputs=[inputs], outputs=[Y_proba])"
"model = tf.keras.Model(inputs=[inputs], outputs=[Y_proba])"
]
},
{
@ -1269,7 +1269,7 @@
"source": [
"DOWNLOAD_ROOT = \"http://download.tensorflow.org/data/\"\n",
"FILENAME = \"quickdraw_tutorial_dataset_v1.tar.gz\"\n",
"filepath = keras.utils.get_file(FILENAME,\n",
"filepath = tf.keras.utils.get_file(FILENAME,\n",
" DOWNLOAD_ROOT + FILENAME,\n",
" cache_subdir=\"datasets/quickdraw\",\n",
" extract=True)"
@ -1472,18 +1472,18 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
" keras.layers.Conv1D(32, kernel_size=5, strides=2, activation=\"relu\"),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.Conv1D(64, kernel_size=5, strides=2, activation=\"relu\"),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.Conv1D(128, kernel_size=3, strides=2, activation=\"relu\"),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.LSTM(128, return_sequences=True),\n",
" keras.layers.LSTM(128),\n",
" keras.layers.Dense(len(class_names), activation=\"softmax\")\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Conv1D(32, kernel_size=5, strides=2, activation=\"relu\"),\n",
" tf.keras.layers.BatchNormalization(),\n",
" tf.keras.layers.Conv1D(64, kernel_size=5, strides=2, activation=\"relu\"),\n",
" tf.keras.layers.BatchNormalization(),\n",
" tf.keras.layers.Conv1D(128, kernel_size=3, strides=2, activation=\"relu\"),\n",
" tf.keras.layers.BatchNormalization(),\n",
" tf.keras.layers.LSTM(128, return_sequences=True),\n",
" tf.keras.layers.LSTM(128),\n",
" tf.keras.layers.Dense(len(class_names), activation=\"softmax\")\n",
"])\n",
"optimizer = keras.optimizers.SGD(learning_rate=1e-2, clipnorm=1.)\n",
"optimizer = tf.keras.optimizers.SGD(learning_rate=1e-2, clipnorm=1.)\n",
"model.compile(loss=\"sparse_categorical_crossentropy\",\n",
" optimizer=optimizer,\n",
" metrics=[\"accuracy\", \"sparse_top_k_categorical_accuracy\"])\n",
@ -1507,7 +1507,7 @@
"metadata": {},
"outputs": [],
"source": [
"np.mean(keras.metrics.sparse_top_k_categorical_accuracy(y_test, y_probas))"
"np.mean(tf.keras.metrics.sparse_top_k_categorical_accuracy(y_test, y_probas))"
]
},
{
@ -1557,7 +1557,7 @@
"source": [
"DOWNLOAD_ROOT = \"https://github.com/ageron/handson-ml2/raw/master/datasets/jsb_chorales/\"\n",
"FILENAME = \"jsb_chorales.tgz\"\n",
"filepath = keras.utils.get_file(FILENAME,\n",
"filepath = tf.keras.utils.get_file(FILENAME,\n",
" DOWNLOAD_ROOT + FILENAME,\n",
" cache_subdir=\"datasets/jsb_chorales\",\n",
" extract=True)"
@ -1792,19 +1792,19 @@
"source": [
"n_embedding_dims = 5\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.Embedding(input_dim=n_notes, output_dim=n_embedding_dims,\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Embedding(input_dim=n_notes, output_dim=n_embedding_dims,\n",
" input_shape=[None]),\n",
" keras.layers.Conv1D(32, kernel_size=2, padding=\"causal\", activation=\"relu\"),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.Conv1D(48, kernel_size=2, padding=\"causal\", activation=\"relu\", dilation_rate=2),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.Conv1D(64, kernel_size=2, padding=\"causal\", activation=\"relu\", dilation_rate=4),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.Conv1D(96, kernel_size=2, padding=\"causal\", activation=\"relu\", dilation_rate=8),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.LSTM(256, return_sequences=True),\n",
" keras.layers.Dense(n_notes, activation=\"softmax\")\n",
" tf.keras.layers.Conv1D(32, kernel_size=2, padding=\"causal\", activation=\"relu\"),\n",
" tf.keras.layers.BatchNormalization(),\n",
" tf.keras.layers.Conv1D(48, kernel_size=2, padding=\"causal\", activation=\"relu\", dilation_rate=2),\n",
" tf.keras.layers.BatchNormalization(),\n",
" tf.keras.layers.Conv1D(64, kernel_size=2, padding=\"causal\", activation=\"relu\", dilation_rate=4),\n",
" tf.keras.layers.BatchNormalization(),\n",
" tf.keras.layers.Conv1D(96, kernel_size=2, padding=\"causal\", activation=\"relu\", dilation_rate=8),\n",
" tf.keras.layers.BatchNormalization(),\n",
" tf.keras.layers.LSTM(256, return_sequences=True),\n",
" tf.keras.layers.Dense(n_notes, activation=\"softmax\")\n",
"])\n",
"\n",
"model.summary()"
@ -1823,7 +1823,7 @@
"metadata": {},
"outputs": [],
"source": [
"optimizer = keras.optimizers.Nadam(learning_rate=1e-3)\n",
"optimizer = tf.keras.optimizers.Nadam(learning_rate=1e-3)\n",
"model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer,\n",
" metrics=[\"accuracy\"])\n",
"model.fit(train_set, epochs=20, validation_data=valid_set)"

View File

@ -161,7 +161,7 @@
"outputs": [],
"source": [
"shakespeare_url = \"https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt\"\n",
"filepath = keras.utils.get_file(\"shakespeare.txt\", shakespeare_url)\n",
"filepath = tf.keras.utils.get_file(\"shakespeare.txt\", shakespeare_url)\n",
"with open(filepath) as f:\n",
" shakespeare_text = f.read()"
]
@ -190,7 +190,7 @@
"metadata": {},
"outputs": [],
"source": [
"tokenizer = keras.preprocessing.text.Tokenizer(char_level=True)\n",
"tokenizer = tf.keras.preprocessing.text.Tokenizer(char_level=True)\n",
"tokenizer.fit_on_texts(shakespeare_text)"
]
},
@ -337,14 +337,14 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
" keras.layers.GRU(128, return_sequences=True, input_shape=[None, max_id],\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.GRU(128, return_sequences=True, input_shape=[None, max_id],\n",
" #dropout=0.2, recurrent_dropout=0.2),\n",
" dropout=0.2),\n",
" keras.layers.GRU(128, return_sequences=True,\n",
" tf.keras.layers.GRU(128, return_sequences=True,\n",
" #dropout=0.2, recurrent_dropout=0.2),\n",
" dropout=0.2),\n",
" keras.layers.TimeDistributed(keras.layers.Dense(max_id,\n",
" tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(max_id,\n",
" activation=\"softmax\"))\n",
"])\n",
"model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=\"adam\")\n",
@ -531,15 +531,15 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
" keras.layers.GRU(128, return_sequences=True, stateful=True,\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.GRU(128, return_sequences=True, stateful=True,\n",
" #dropout=0.2, recurrent_dropout=0.2,\n",
" dropout=0.2,\n",
" batch_input_shape=[batch_size, None, max_id]),\n",
" keras.layers.GRU(128, return_sequences=True, stateful=True,\n",
" tf.keras.layers.GRU(128, return_sequences=True, stateful=True,\n",
" #dropout=0.2, recurrent_dropout=0.2),\n",
" dropout=0.2),\n",
" keras.layers.TimeDistributed(keras.layers.Dense(max_id,\n",
" tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(max_id,\n",
" activation=\"softmax\"))\n",
"])"
]
@ -550,7 +550,7 @@
"metadata": {},
"outputs": [],
"source": [
"class ResetStatesCallback(keras.callbacks.Callback):\n",
"class ResetStatesCallback(tf.keras.callbacks.Callback):\n",
" def on_epoch_begin(self, epoch, logs):\n",
" self.model.reset_states()"
]
@ -579,10 +579,10 @@
"metadata": {},
"outputs": [],
"source": [
"stateless_model = keras.models.Sequential([\n",
" keras.layers.GRU(128, return_sequences=True, input_shape=[None, max_id]),\n",
" keras.layers.GRU(128, return_sequences=True),\n",
" keras.layers.TimeDistributed(keras.layers.Dense(max_id,\n",
"stateless_model = tf.keras.Sequential([\n",
" tf.keras.layers.GRU(128, return_sequences=True, input_shape=[None, max_id]),\n",
" tf.keras.layers.GRU(128, return_sequences=True),\n",
" tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(max_id,\n",
" activation=\"softmax\"))\n",
"])"
]
@ -653,7 +653,7 @@
"metadata": {},
"outputs": [],
"source": [
"(X_train, y_train), (X_test, y_test) = keras.datasets.imdb.load_data()"
"(X_train, y_train), (X_test, y_test) = tf.keras.datasets.imdb.load_data()"
]
},
{
@ -671,7 +671,7 @@
"metadata": {},
"outputs": [],
"source": [
"word_index = keras.datasets.imdb.get_word_index()\n",
"word_index = tf.keras.datasets.imdb.get_word_index()\n",
"id_to_word = {id_ + 3: word for word, id_ in word_index.items()}\n",
"for id_, token in enumerate((\"<pad>\", \"<sos>\", \"<unk>\")):\n",
" id_to_word[id_] = token\n",
@ -860,13 +860,13 @@
"outputs": [],
"source": [
"embed_size = 128\n",
"model = keras.models.Sequential([\n",
" keras.layers.Embedding(vocab_size + num_oov_buckets, embed_size,\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Embedding(vocab_size + num_oov_buckets, embed_size,\n",
" mask_zero=True, # not shown in the book\n",
" input_shape=[None]),\n",
" keras.layers.GRU(128, return_sequences=True),\n",
" keras.layers.GRU(128),\n",
" keras.layers.Dense(1, activation=\"sigmoid\")\n",
" tf.keras.layers.GRU(128, return_sequences=True),\n",
" tf.keras.layers.GRU(128),\n",
" tf.keras.layers.Dense(1, activation=\"sigmoid\")\n",
"])\n",
"model.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n",
"history = model.fit(train_set, epochs=5)"
@ -885,15 +885,15 @@
"metadata": {},
"outputs": [],
"source": [
"K = keras.backend\n",
"K = tf.keras.backend\n",
"embed_size = 128\n",
"inputs = keras.layers.Input(shape=[None])\n",
"mask = keras.layers.Lambda(lambda inputs: K.not_equal(inputs, 0))(inputs)\n",
"z = keras.layers.Embedding(vocab_size + num_oov_buckets, embed_size)(inputs)\n",
"z = keras.layers.GRU(128, return_sequences=True)(z, mask=mask)\n",
"z = keras.layers.GRU(128)(z, mask=mask)\n",
"outputs = keras.layers.Dense(1, activation=\"sigmoid\")(z)\n",
"model = keras.models.Model(inputs=[inputs], outputs=[outputs])\n",
"inputs = tf.keras.layers.Input(shape=[None])\n",
"mask = tf.keras.layers.Lambda(lambda inputs: K.not_equal(inputs, 0))(inputs)\n",
"z = tf.keras.layers.Embedding(vocab_size + num_oov_buckets, embed_size)(inputs)\n",
"z = tf.keras.layers.GRU(128, return_sequences=True)(z, mask=mask)\n",
"z = tf.keras.layers.GRU(128)(z, mask=mask)\n",
"outputs = tf.keras.layers.Dense(1, activation=\"sigmoid\")(z)\n",
"model = tf.keras.Model(inputs=[inputs], outputs=[outputs])\n",
"model.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n",
"history = model.fit(train_set, epochs=5)"
]
@ -932,11 +932,11 @@
"source": [
"import tensorflow_hub as hub\n",
"\n",
"model = keras.Sequential([\n",
"model = tf.keras.Sequential([\n",
" hub.KerasLayer(\"https://tfhub.dev/google/tf2-preview/nnlm-en-dim50/1\",\n",
" dtype=tf.string, input_shape=[], output_shape=[50]),\n",
" keras.layers.Dense(128, activation=\"relu\"),\n",
" keras.layers.Dense(1, activation=\"sigmoid\")\n",
" tf.keras.layers.Dense(128, activation=\"relu\"),\n",
" tf.keras.layers.Dense(1, activation=\"sigmoid\")\n",
"])\n",
"model.compile(loss=\"binary_crossentropy\", optimizer=\"adam\",\n",
" metrics=[\"accuracy\"])"
@ -1028,22 +1028,22 @@
"source": [
"import tensorflow_addons as tfa\n",
"\n",
"encoder_inputs = keras.layers.Input(shape=[None], dtype=np.int32)\n",
"decoder_inputs = keras.layers.Input(shape=[None], dtype=np.int32)\n",
"sequence_lengths = keras.layers.Input(shape=[], dtype=np.int32)\n",
"encoder_inputs = tf.keras.layers.Input(shape=[None], dtype=np.int32)\n",
"decoder_inputs = tf.keras.layers.Input(shape=[None], dtype=np.int32)\n",
"sequence_lengths = tf.keras.layers.Input(shape=[], dtype=np.int32)\n",
"\n",
"embeddings = keras.layers.Embedding(vocab_size, embed_size)\n",
"embeddings = tf.keras.layers.Embedding(vocab_size, embed_size)\n",
"encoder_embeddings = embeddings(encoder_inputs)\n",
"decoder_embeddings = embeddings(decoder_inputs)\n",
"\n",
"encoder = keras.layers.LSTM(512, return_state=True)\n",
"encoder = tf.keras.layers.LSTM(512, return_state=True)\n",
"encoder_outputs, state_h, state_c = encoder(encoder_embeddings)\n",
"encoder_state = [state_h, state_c]\n",
"\n",
"sampler = tfa.seq2seq.sampler.TrainingSampler()\n",
"\n",
"decoder_cell = keras.layers.LSTMCell(512)\n",
"output_layer = keras.layers.Dense(vocab_size)\n",
"decoder_cell = tf.keras.layers.LSTMCell(512)\n",
"output_layer = tf.keras.layers.Dense(vocab_size)\n",
"decoder = tfa.seq2seq.basic_decoder.BasicDecoder(decoder_cell, sampler,\n",
" output_layer=output_layer)\n",
"final_outputs, final_state, final_sequence_lengths = decoder(\n",
@ -1051,7 +1051,7 @@
" sequence_length=sequence_lengths)\n",
"Y_proba = tf.nn.softmax(final_outputs.rnn_output)\n",
"\n",
"model = keras.models.Model(\n",
"model = tf.keras.Model(\n",
" inputs=[encoder_inputs, decoder_inputs, sequence_lengths],\n",
" outputs=[Y_proba])"
]
@ -1092,9 +1092,9 @@
"metadata": {},
"outputs": [],
"source": [
"model = keras.models.Sequential([\n",
" keras.layers.GRU(10, return_sequences=True, input_shape=[None, 10]),\n",
" keras.layers.Bidirectional(keras.layers.GRU(10, return_sequences=True))\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.GRU(10, return_sequences=True, input_shape=[None, 10]),\n",
" tf.keras.layers.Bidirectional(tf.keras.layers.GRU(10, return_sequences=True))\n",
"])\n",
"\n",
"model.summary()"
@ -1113,7 +1113,7 @@
"metadata": {},
"outputs": [],
"source": [
"class PositionalEncoding(keras.layers.Layer):\n",
"class PositionalEncoding(tf.keras.layers.Layer):\n",
" def __init__(self, max_steps, max_dims, dtype=tf.float32, **kwargs):\n",
" super().__init__(dtype=dtype, **kwargs)\n",
" if max_dims % 2 == 1: max_dims += 1 # max_dims must be even\n",
@ -1182,9 +1182,9 @@
"outputs": [],
"source": [
"embed_size = 512; max_steps = 500; vocab_size = 10000\n",
"encoder_inputs = keras.layers.Input(shape=[None], dtype=np.int32)\n",
"decoder_inputs = keras.layers.Input(shape=[None], dtype=np.int32)\n",
"embeddings = keras.layers.Embedding(vocab_size, embed_size)\n",
"encoder_inputs = tf.keras.layers.Input(shape=[None], dtype=np.int32)\n",
"decoder_inputs = tf.keras.layers.Input(shape=[None], dtype=np.int32)\n",
"embeddings = tf.keras.layers.Embedding(vocab_size, embed_size)\n",
"encoder_embeddings = embeddings(encoder_inputs)\n",
"decoder_embeddings = embeddings(decoder_inputs)\n",
"positional_encoding = PositionalEncoding(max_steps, max_dims=embed_size)\n",
@ -1207,23 +1207,23 @@
"source": [
"Z = encoder_in\n",
"for N in range(6):\n",
" Z = keras.layers.Attention(use_scale=True)([Z, Z])\n",
" Z = tf.keras.layers.Attention(use_scale=True)([Z, Z])\n",
"\n",
"encoder_outputs = Z\n",
"Z = decoder_in\n",
"for N in range(6):\n",
" Z = keras.layers.Attention(use_scale=True, causal=True)([Z, Z])\n",
" Z = keras.layers.Attention(use_scale=True)([Z, encoder_outputs])\n",
" Z = tf.keras.layers.Attention(use_scale=True, causal=True)([Z, Z])\n",
" Z = tf.keras.layers.Attention(use_scale=True)([Z, encoder_outputs])\n",
"\n",
"outputs = keras.layers.TimeDistributed(\n",
" keras.layers.Dense(vocab_size, activation=\"softmax\"))(Z)"
"outputs = tf.keras.layers.TimeDistributed(\n",
" tf.keras.layers.Dense(vocab_size, activation=\"softmax\"))(Z)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Here's a basic implementation of the `MultiHeadAttention` layer. One will likely be added to `keras.layers` in the near future. Note that `Conv1D` layers with `kernel_size=1` (and the default `padding=\"valid\"` and `strides=1`) is equivalent to a `TimeDistributed(Dense(...))` layer."
"Here's a basic implementation of the `MultiHeadAttention` layer. One will likely be added to `tf.keras.layers` in the near future. Note that `Conv1D` layers with `kernel_size=1` (and the default `padding=\"valid\"` and `strides=1`) is equivalent to a `TimeDistributed(Dense(...))` layer."
]
},
{
@ -1232,9 +1232,9 @@
"metadata": {},
"outputs": [],
"source": [
"K = keras.backend\n",
"K = tf.keras.backend\n",
"\n",
"class MultiHeadAttention(keras.layers.Layer):\n",
"class MultiHeadAttention(tf.keras.layers.Layer):\n",
" def __init__(self, n_heads, causal=False, use_scale=False, **kwargs):\n",
" self.n_heads = n_heads\n",
" self.causal = causal\n",
@ -1243,11 +1243,11 @@
" def build(self, batch_input_shape):\n",
" self.dims = batch_input_shape[0][-1]\n",
" self.q_dims, self.v_dims, self.k_dims = [self.dims // self.n_heads] * 3 # could be hyperparameters instead\n",
" self.q_linear = keras.layers.Conv1D(self.n_heads * self.q_dims, kernel_size=1, use_bias=False)\n",
" self.v_linear = keras.layers.Conv1D(self.n_heads * self.v_dims, kernel_size=1, use_bias=False)\n",
" self.k_linear = keras.layers.Conv1D(self.n_heads * self.k_dims, kernel_size=1, use_bias=False)\n",
" self.attention = keras.layers.Attention(causal=self.causal, use_scale=self.use_scale)\n",
" self.out_linear = keras.layers.Conv1D(self.dims, kernel_size=1, use_bias=False)\n",
" self.q_linear = tf.keras.layers.Conv1D(self.n_heads * self.q_dims, kernel_size=1, use_bias=False)\n",
" self.v_linear = tf.keras.layers.Conv1D(self.n_heads * self.v_dims, kernel_size=1, use_bias=False)\n",
" self.k_linear = tf.keras.layers.Conv1D(self.n_heads * self.k_dims, kernel_size=1, use_bias=False)\n",
" self.attention = tf.keras.layers.Attention(causal=self.causal, use_scale=self.use_scale)\n",
" self.out_linear = tf.keras.layers.Conv1D(self.dims, kernel_size=1, use_bias=False)\n",
" super().build(batch_input_shape)\n",
" def _multi_head_linear(self, inputs, linear):\n",
" shape = K.concatenate([K.shape(inputs)[:-1], [self.n_heads, -1]])\n",
@ -1547,13 +1547,13 @@
"\n",
"embedding_size = 5\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.InputLayer(input_shape=[None], dtype=tf.int32, ragged=True),\n",
" keras.layers.Embedding(input_dim=len(POSSIBLE_CHARS), output_dim=embedding_size),\n",
" keras.layers.GRU(30),\n",
" keras.layers.Dense(1, activation=\"sigmoid\")\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.InputLayer(input_shape=[None], dtype=tf.int32, ragged=True),\n",
" tf.keras.layers.Embedding(input_dim=len(POSSIBLE_CHARS), output_dim=embedding_size),\n",
" tf.keras.layers.GRU(30),\n",
" tf.keras.layers.Dense(1, activation=\"sigmoid\")\n",
"])\n",
"optimizer = keras.optimizers.SGD(learning_rate=0.02, momentum = 0.95, nesterov=True)\n",
"optimizer = tf.keras.optimizers.SGD(learning_rate=0.02, momentum = 0.95, nesterov=True)\n",
"model.compile(loss=\"binary_crossentropy\", optimizer=optimizer, metrics=[\"accuracy\"])\n",
"history = model.fit(X_train, y_train, epochs=20, validation_data=(X_valid, y_valid))"
]
@ -1785,25 +1785,25 @@
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"encoder = keras.models.Sequential([\n",
" keras.layers.Embedding(input_dim=len(INPUT_CHARS) + 1,\n",
"encoder = tf.keras.Sequential([\n",
" tf.keras.layers.Embedding(input_dim=len(INPUT_CHARS) + 1,\n",
" output_dim=embedding_size,\n",
" input_shape=[None]),\n",
" keras.layers.LSTM(128)\n",
" tf.keras.layers.LSTM(128)\n",
"])\n",
"\n",
"decoder = keras.models.Sequential([\n",
" keras.layers.LSTM(128, return_sequences=True),\n",
" keras.layers.Dense(len(OUTPUT_CHARS) + 1, activation=\"softmax\")\n",
"decoder = tf.keras.Sequential([\n",
" tf.keras.layers.LSTM(128, return_sequences=True),\n",
" tf.keras.layers.Dense(len(OUTPUT_CHARS) + 1, activation=\"softmax\")\n",
"])\n",
"\n",
"model = keras.models.Sequential([\n",
"model = tf.keras.Sequential([\n",
" encoder,\n",
" keras.layers.RepeatVector(max_output_length),\n",
" tf.keras.layers.RepeatVector(max_output_length),\n",
" decoder\n",
"])\n",
"\n",
"optimizer = keras.optimizers.Nadam()\n",
"optimizer = tf.keras.optimizers.Nadam()\n",
"model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer,\n",
" metrics=[\"accuracy\"])\n",
"history = model.fit(X_train, Y_train, epochs=20,\n",
@ -2017,27 +2017,27 @@
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"encoder_input = keras.layers.Input(shape=[None], dtype=tf.int32)\n",
"encoder_embedding = keras.layers.Embedding(\n",
"encoder_input = tf.keras.layers.Input(shape=[None], dtype=tf.int32)\n",
"encoder_embedding = tf.keras.layers.Embedding(\n",
" input_dim=len(INPUT_CHARS) + 1,\n",
" output_dim=encoder_embedding_size)(encoder_input)\n",
"_, encoder_state_h, encoder_state_c = keras.layers.LSTM(\n",
"_, encoder_state_h, encoder_state_c = tf.keras.layers.LSTM(\n",
" lstm_units, return_state=True)(encoder_embedding)\n",
"encoder_state = [encoder_state_h, encoder_state_c]\n",
"\n",
"decoder_input = keras.layers.Input(shape=[None], dtype=tf.int32)\n",
"decoder_embedding = keras.layers.Embedding(\n",
"decoder_input = tf.keras.layers.Input(shape=[None], dtype=tf.int32)\n",
"decoder_embedding = tf.keras.layers.Embedding(\n",
" input_dim=len(OUTPUT_CHARS) + 2,\n",
" output_dim=decoder_embedding_size)(decoder_input)\n",
"decoder_lstm_output = keras.layers.LSTM(lstm_units, return_sequences=True)(\n",
"decoder_lstm_output = tf.keras.layers.LSTM(lstm_units, return_sequences=True)(\n",
" decoder_embedding, initial_state=encoder_state)\n",
"decoder_output = keras.layers.Dense(len(OUTPUT_CHARS) + 1,\n",
"decoder_output = tf.keras.layers.Dense(len(OUTPUT_CHARS) + 1,\n",
" activation=\"softmax\")(decoder_lstm_output)\n",
"\n",
"model = keras.models.Model(inputs=[encoder_input, decoder_input],\n",
"model = tf.keras.Model(inputs=[encoder_input, decoder_input],\n",
" outputs=[decoder_output])\n",
"\n",
"optimizer = keras.optimizers.Nadam()\n",
"optimizer = tf.keras.optimizers.Nadam()\n",
"model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer,\n",
" metrics=[\"accuracy\"])\n",
"history = model.fit([X_train, X_train_decoder], Y_train, epochs=10,\n",
@ -2123,25 +2123,25 @@
"decoder_embedding_size = 32\n",
"units = 128\n",
"\n",
"encoder_inputs = keras.layers.Input(shape=[None], dtype=np.int32)\n",
"decoder_inputs = keras.layers.Input(shape=[None], dtype=np.int32)\n",
"sequence_lengths = keras.layers.Input(shape=[], dtype=np.int32)\n",
"encoder_inputs = tf.keras.layers.Input(shape=[None], dtype=np.int32)\n",
"decoder_inputs = tf.keras.layers.Input(shape=[None], dtype=np.int32)\n",
"sequence_lengths = tf.keras.layers.Input(shape=[], dtype=np.int32)\n",
"\n",
"encoder_embeddings = keras.layers.Embedding(\n",
"encoder_embeddings = tf.keras.layers.Embedding(\n",
" len(INPUT_CHARS) + 1, encoder_embedding_size)(encoder_inputs)\n",
"\n",
"decoder_embedding_layer = keras.layers.Embedding(\n",
"decoder_embedding_layer = tf.keras.layers.Embedding(\n",
" len(OUTPUT_CHARS) + 2, decoder_embedding_size)\n",
"decoder_embeddings = decoder_embedding_layer(decoder_inputs)\n",
"\n",
"encoder = keras.layers.LSTM(units, return_state=True)\n",
"encoder = tf.keras.layers.LSTM(units, return_state=True)\n",
"encoder_outputs, state_h, state_c = encoder(encoder_embeddings)\n",
"encoder_state = [state_h, state_c]\n",
"\n",
"sampler = tfa.seq2seq.sampler.TrainingSampler()\n",
"\n",
"decoder_cell = keras.layers.LSTMCell(units)\n",
"output_layer = keras.layers.Dense(len(OUTPUT_CHARS) + 1)\n",
"decoder_cell = tf.keras.layers.LSTMCell(units)\n",
"output_layer = tf.keras.layers.Dense(len(OUTPUT_CHARS) + 1)\n",
"\n",
"decoder = tfa.seq2seq.basic_decoder.BasicDecoder(decoder_cell,\n",
" sampler,\n",
@ -2149,11 +2149,11 @@
"final_outputs, final_state, final_sequence_lengths = decoder(\n",
" decoder_embeddings,\n",
" initial_state=encoder_state)\n",
"Y_proba = keras.layers.Activation(\"softmax\")(final_outputs.rnn_output)\n",
"Y_proba = tf.keras.layers.Activation(\"softmax\")(final_outputs.rnn_output)\n",
"\n",
"model = keras.models.Model(inputs=[encoder_inputs, decoder_inputs],\n",
"model = tf.keras.Model(inputs=[encoder_inputs, decoder_inputs],\n",
" outputs=[Y_proba])\n",
"optimizer = keras.optimizers.Nadam()\n",
"optimizer = tf.keras.optimizers.Nadam()\n",
"model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer,\n",
" metrics=[\"accuracy\"])\n",
"history = model.fit([X_train, X_train_decoder], Y_train, epochs=15,\n",
@ -2206,7 +2206,7 @@
" start_tokens=start_tokens,\n",
" end_token=0)\n",
"\n",
"inference_model = keras.models.Model(inputs=[encoder_inputs],\n",
"inference_model = tf.keras.Model(inputs=[encoder_inputs],\n",
" outputs=[final_outputs.sample_id])"
]
},
@ -2320,18 +2320,18 @@
"decoder_embedding_size = 32\n",
"units = 128\n",
"\n",
"encoder_inputs = keras.layers.Input(shape=[None], dtype=np.int32)\n",
"decoder_inputs = keras.layers.Input(shape=[None], dtype=np.int32)\n",
"sequence_lengths = keras.layers.Input(shape=[], dtype=np.int32)\n",
"encoder_inputs = tf.keras.layers.Input(shape=[None], dtype=np.int32)\n",
"decoder_inputs = tf.keras.layers.Input(shape=[None], dtype=np.int32)\n",
"sequence_lengths = tf.keras.layers.Input(shape=[], dtype=np.int32)\n",
"\n",
"encoder_embeddings = keras.layers.Embedding(\n",
"encoder_embeddings = tf.keras.layers.Embedding(\n",
" len(INPUT_CHARS) + 1, encoder_embedding_size)(encoder_inputs)\n",
"\n",
"decoder_embedding_layer = keras.layers.Embedding(\n",
"decoder_embedding_layer = tf.keras.layers.Embedding(\n",
" len(OUTPUT_CHARS) + 2, decoder_embedding_size)\n",
"decoder_embeddings = decoder_embedding_layer(decoder_inputs)\n",
"\n",
"encoder = keras.layers.LSTM(units, return_state=True)\n",
"encoder = tf.keras.layers.LSTM(units, return_state=True)\n",
"encoder_outputs, state_h, state_c = encoder(encoder_embeddings)\n",
"encoder_state = [state_h, state_c]\n",
"\n",
@ -2342,8 +2342,8 @@
"# (see https://github.com/tensorflow/addons/pull/1714)\n",
"sampler.sampling_probability = tf.Variable(0.)\n",
"\n",
"decoder_cell = keras.layers.LSTMCell(units)\n",
"output_layer = keras.layers.Dense(len(OUTPUT_CHARS) + 1)\n",
"decoder_cell = tf.keras.layers.LSTMCell(units)\n",
"output_layer = tf.keras.layers.Dense(len(OUTPUT_CHARS) + 1)\n",
"\n",
"decoder = tfa.seq2seq.basic_decoder.BasicDecoder(decoder_cell,\n",
" sampler,\n",
@ -2351,11 +2351,11 @@
"final_outputs, final_state, final_sequence_lengths = decoder(\n",
" decoder_embeddings,\n",
" initial_state=encoder_state)\n",
"Y_proba = keras.layers.Activation(\"softmax\")(final_outputs.rnn_output)\n",
"Y_proba = tf.keras.layers.Activation(\"softmax\")(final_outputs.rnn_output)\n",
"\n",
"model = keras.models.Model(inputs=[encoder_inputs, decoder_inputs],\n",
"model = tf.keras.Model(inputs=[encoder_inputs, decoder_inputs],\n",
" outputs=[Y_proba])\n",
"optimizer = keras.optimizers.Nadam()\n",
"optimizer = tf.keras.optimizers.Nadam()\n",
"model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer,\n",
" metrics=[\"accuracy\"])\n",
"\n",
@ -2363,7 +2363,7 @@
" proba = min(1.0, epoch / (n_epochs - 10))\n",
" sampler.sampling_probability.assign(proba)\n",
"\n",
"sampling_probability_cb = keras.callbacks.LambdaCallback(\n",
"sampling_probability_cb = tf.keras.callbacks.LambdaCallback(\n",
" on_epoch_begin=update_sampling_probability)\n",
"history = model.fit([X_train, X_train_decoder], Y_train, epochs=n_epochs,\n",
" validation_data=([X_valid, X_valid_decoder], Y_valid),\n",
@ -2407,7 +2407,7 @@
" start_tokens=start_tokens,\n",
" end_token=0)\n",
"\n",
"inference_model = keras.models.Model(inputs=[encoder_inputs],\n",
"inference_model = tf.keras.Model(inputs=[encoder_inputs],\n",
" outputs=[final_outputs.sample_id])"
]
},
@ -2490,25 +2490,25 @@
"metadata": {},
"outputs": [],
"source": [
"class DateTranslation(keras.models.Model):\n",
"class DateTranslation(tf.keras.Model):\n",
" def __init__(self, units=128, encoder_embedding_size=32,\n",
" decoder_embedding_size=32, **kwargs):\n",
" super().__init__(**kwargs)\n",
" self.encoder_embedding = keras.layers.Embedding(\n",
" self.encoder_embedding = tf.keras.layers.Embedding(\n",
" input_dim=len(INPUT_CHARS) + 1,\n",
" output_dim=encoder_embedding_size)\n",
" self.encoder = keras.layers.LSTM(units,\n",
" self.encoder = tf.keras.layers.LSTM(units,\n",
" return_sequences=True,\n",
" return_state=True)\n",
" self.decoder_embedding = keras.layers.Embedding(\n",
" self.decoder_embedding = tf.keras.layers.Embedding(\n",
" input_dim=len(OUTPUT_CHARS) + 2,\n",
" output_dim=decoder_embedding_size)\n",
" self.attention = tfa.seq2seq.LuongAttention(units)\n",
" decoder_inner_cell = keras.layers.LSTMCell(units)\n",
" decoder_inner_cell = tf.keras.layers.LSTMCell(units)\n",
" self.decoder_cell = tfa.seq2seq.AttentionWrapper(\n",
" cell=decoder_inner_cell,\n",
" attention_mechanism=self.attention)\n",
" output_layer = keras.layers.Dense(len(OUTPUT_CHARS) + 1)\n",
" output_layer = tf.keras.layers.Dense(len(OUTPUT_CHARS) + 1)\n",
" self.decoder = tfa.seq2seq.BasicDecoder(\n",
" cell=self.decoder_cell,\n",
" sampler=tfa.seq2seq.sampler.TrainingSampler(),\n",
@ -2564,7 +2564,7 @@
"tf.random.set_seed(42)\n",
"\n",
"model = DateTranslation()\n",
"optimizer = keras.optimizers.Nadam()\n",
"optimizer = tf.keras.optimizers.Nadam()\n",
"model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer,\n",
" metrics=[\"accuracy\"])\n",
"history = model.fit([X_train, X_train_decoder], Y_train, epochs=25,\n",

View File

@ -166,11 +166,11 @@
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"encoder = keras.models.Sequential([keras.layers.Dense(2, input_shape=[3])])\n",
"decoder = keras.models.Sequential([keras.layers.Dense(3, input_shape=[2])])\n",
"autoencoder = keras.models.Sequential([encoder, decoder])\n",
"encoder = tf.keras.Sequential([tf.keras.layers.Dense(2, input_shape=[3])])\n",
"decoder = tf.keras.Sequential([tf.keras.layers.Dense(3, input_shape=[2])])\n",
"autoencoder = tf.keras.Sequential([encoder, decoder])\n",
"\n",
"autoencoder.compile(loss=\"mse\", optimizer=keras.optimizers.SGD(learning_rate=1.5))"
"autoencoder.compile(loss=\"mse\", optimizer=tf.keras.optimizers.SGD(learning_rate=1.5))"
]
},
{
@ -226,7 +226,7 @@
"metadata": {},
"outputs": [],
"source": [
"(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data()\n",
"(X_train_full, y_train_full), (X_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()\n",
"X_train_full = X_train_full.astype(np.float32) / 255\n",
"X_test = X_test.astype(np.float32) / 255\n",
"X_train, X_valid = X_train_full[:-5000], X_train_full[-5000:]\n",
@ -254,7 +254,7 @@
"outputs": [],
"source": [
"def rounded_accuracy(y_true, y_pred):\n",
" return keras.metrics.binary_accuracy(tf.round(y_true), tf.round(y_pred))"
" return tf.keras.metrics.binary_accuracy(tf.round(y_true), tf.round(y_pred))"
]
},
{
@ -266,19 +266,19 @@
"tf.random.set_seed(42)\n",
"np.random.seed(42)\n",
"\n",
"stacked_encoder = keras.models.Sequential([\n",
" keras.layers.Flatten(input_shape=[28, 28]),\n",
" keras.layers.Dense(100, activation=\"selu\"),\n",
" keras.layers.Dense(30, activation=\"selu\"),\n",
"stacked_encoder = tf.keras.Sequential([\n",
" tf.keras.layers.Flatten(input_shape=[28, 28]),\n",
" tf.keras.layers.Dense(100, activation=\"selu\"),\n",
" tf.keras.layers.Dense(30, activation=\"selu\"),\n",
"])\n",
"stacked_decoder = keras.models.Sequential([\n",
" keras.layers.Dense(100, activation=\"selu\", input_shape=[30]),\n",
" keras.layers.Dense(28 * 28, activation=\"sigmoid\"),\n",
" keras.layers.Reshape([28, 28])\n",
"stacked_decoder = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(100, activation=\"selu\", input_shape=[30]),\n",
" tf.keras.layers.Dense(28 * 28, activation=\"sigmoid\"),\n",
" tf.keras.layers.Reshape([28, 28])\n",
"])\n",
"stacked_ae = keras.models.Sequential([stacked_encoder, stacked_decoder])\n",
"stacked_ae = tf.keras.Sequential([stacked_encoder, stacked_decoder])\n",
"stacked_ae.compile(loss=\"binary_crossentropy\",\n",
" optimizer=keras.optimizers.SGD(learning_rate=1.5), metrics=[rounded_accuracy])\n",
" optimizer=tf.keras.optimizers.SGD(learning_rate=1.5), metrics=[rounded_accuracy])\n",
"history = stacked_ae.fit(X_train, X_train, epochs=20,\n",
" validation_data=(X_valid, X_valid))"
]
@ -401,10 +401,10 @@
"metadata": {},
"outputs": [],
"source": [
"class DenseTranspose(keras.layers.Layer):\n",
"class DenseTranspose(tf.keras.layers.Layer):\n",
" def __init__(self, dense, activation=None, **kwargs):\n",
" self.dense = dense\n",
" self.activation = keras.activations.get(activation)\n",
" self.activation = tf.keras.activations.get(activation)\n",
" super().__init__(**kwargs)\n",
" def build(self, batch_input_shape):\n",
" self.biases = self.add_weight(name=\"bias\",\n",
@ -422,29 +422,29 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"tf.random.set_seed(42)\n",
"np.random.seed(42)\n",
"\n",
"dense_1 = keras.layers.Dense(100, activation=\"selu\")\n",
"dense_2 = keras.layers.Dense(30, activation=\"selu\")\n",
"dense_1 = tf.keras.layers.Dense(100, activation=\"selu\")\n",
"dense_2 = tf.keras.layers.Dense(30, activation=\"selu\")\n",
"\n",
"tied_encoder = keras.models.Sequential([\n",
" keras.layers.Flatten(input_shape=[28, 28]),\n",
"tied_encoder = tf.keras.Sequential([\n",
" tf.keras.layers.Flatten(input_shape=[28, 28]),\n",
" dense_1,\n",
" dense_2\n",
"])\n",
"\n",
"tied_decoder = keras.models.Sequential([\n",
"tied_decoder = tf.keras.Sequential([\n",
" DenseTranspose(dense_2, activation=\"selu\"),\n",
" DenseTranspose(dense_1, activation=\"sigmoid\"),\n",
" keras.layers.Reshape([28, 28])\n",
" tf.keras.layers.Reshape([28, 28])\n",
"])\n",
"\n",
"tied_ae = keras.models.Sequential([tied_encoder, tied_decoder])\n",
"tied_ae = tf.keras.Sequential([tied_encoder, tied_decoder])\n",
"\n",
"tied_ae.compile(loss=\"binary_crossentropy\",\n",
" optimizer=keras.optimizers.SGD(learning_rate=1.5), metrics=[rounded_accuracy])\n",
" optimizer=tf.keras.optimizers.SGD(learning_rate=1.5), metrics=[rounded_accuracy])\n",
"history = tied_ae.fit(X_train, X_train, epochs=10,\n",
" validation_data=(X_valid, X_valid))"
]
@ -477,13 +477,13 @@
"def train_autoencoder(n_neurons, X_train, X_valid, loss, optimizer,\n",
" n_epochs=10, output_activation=None, metrics=None):\n",
" n_inputs = X_train.shape[-1]\n",
" encoder = keras.models.Sequential([\n",
" keras.layers.Dense(n_neurons, activation=\"selu\", input_shape=[n_inputs])\n",
" encoder = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(n_neurons, activation=\"selu\", input_shape=[n_inputs])\n",
" ])\n",
" decoder = keras.models.Sequential([\n",
" keras.layers.Dense(n_inputs, activation=output_activation),\n",
" decoder = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(n_inputs, activation=output_activation),\n",
" ])\n",
" autoencoder = keras.models.Sequential([encoder, decoder])\n",
" autoencoder = tf.keras.Sequential([encoder, decoder])\n",
" autoencoder.compile(optimizer, loss, metrics=metrics)\n",
" autoencoder.fit(X_train, X_train, epochs=n_epochs,\n",
" validation_data=(X_valid, X_valid))\n",
@ -499,15 +499,15 @@
"tf.random.set_seed(42)\n",
"np.random.seed(42)\n",
"\n",
"K = keras.backend\n",
"K = tf.keras.backend\n",
"X_train_flat = K.batch_flatten(X_train) # equivalent to .reshape(-1, 28 * 28)\n",
"X_valid_flat = K.batch_flatten(X_valid)\n",
"enc1, dec1, X_train_enc1, X_valid_enc1 = train_autoencoder(\n",
" 100, X_train_flat, X_valid_flat, \"binary_crossentropy\",\n",
" keras.optimizers.SGD(learning_rate=1.5), output_activation=\"sigmoid\",\n",
" tf.keras.optimizers.SGD(learning_rate=1.5), output_activation=\"sigmoid\",\n",
" metrics=[rounded_accuracy])\n",
"enc2, dec2, _, _ = train_autoencoder(\n",
" 30, X_train_enc1, X_valid_enc1, \"mse\", keras.optimizers.SGD(learning_rate=0.05),\n",
" 30, X_train_enc1, X_valid_enc1, \"mse\", tf.keras.optimizers.SGD(learning_rate=0.05),\n",
" output_activation=\"selu\")"
]
},
@ -517,10 +517,10 @@
"metadata": {},
"outputs": [],
"source": [
"stacked_ae_1_by_1 = keras.models.Sequential([\n",
" keras.layers.Flatten(input_shape=[28, 28]),\n",
"stacked_ae_1_by_1 = tf.keras.Sequential([\n",
" tf.keras.layers.Flatten(input_shape=[28, 28]),\n",
" enc1, enc2, dec2, dec1,\n",
" keras.layers.Reshape([28, 28])\n",
" tf.keras.layers.Reshape([28, 28])\n",
"])"
]
},
@ -541,7 +541,7 @@
"outputs": [],
"source": [
"stacked_ae_1_by_1.compile(loss=\"binary_crossentropy\",\n",
" optimizer=keras.optimizers.SGD(learning_rate=0.1), metrics=[rounded_accuracy])\n",
" optimizer=tf.keras.optimizers.SGD(learning_rate=0.1), metrics=[rounded_accuracy])\n",
"history = stacked_ae_1_by_1.fit(X_train, X_train, epochs=10,\n",
" validation_data=(X_valid, X_valid))"
]
@ -579,25 +579,25 @@
"tf.random.set_seed(42)\n",
"np.random.seed(42)\n",
"\n",
"conv_encoder = keras.models.Sequential([\n",
" keras.layers.Reshape([28, 28, 1], input_shape=[28, 28]),\n",
" keras.layers.Conv2D(16, kernel_size=3, padding=\"SAME\", activation=\"selu\"),\n",
" keras.layers.MaxPool2D(pool_size=2),\n",
" keras.layers.Conv2D(32, kernel_size=3, padding=\"SAME\", activation=\"selu\"),\n",
" keras.layers.MaxPool2D(pool_size=2),\n",
" keras.layers.Conv2D(64, kernel_size=3, padding=\"SAME\", activation=\"selu\"),\n",
" keras.layers.MaxPool2D(pool_size=2)\n",
"conv_encoder = tf.keras.Sequential([\n",
" tf.keras.layers.Reshape([28, 28, 1], input_shape=[28, 28]),\n",
" tf.keras.layers.Conv2D(16, kernel_size=3, padding=\"SAME\", activation=\"selu\"),\n",
" tf.keras.layers.MaxPool2D(pool_size=2),\n",
" tf.keras.layers.Conv2D(32, kernel_size=3, padding=\"SAME\", activation=\"selu\"),\n",
" tf.keras.layers.MaxPool2D(pool_size=2),\n",
" tf.keras.layers.Conv2D(64, kernel_size=3, padding=\"SAME\", activation=\"selu\"),\n",
" tf.keras.layers.MaxPool2D(pool_size=2)\n",
"])\n",
"conv_decoder = keras.models.Sequential([\n",
" keras.layers.Conv2DTranspose(32, kernel_size=3, strides=2, padding=\"VALID\", activation=\"selu\",\n",
"conv_decoder = tf.keras.Sequential([\n",
" tf.keras.layers.Conv2DTranspose(32, kernel_size=3, strides=2, padding=\"VALID\", activation=\"selu\",\n",
" input_shape=[3, 3, 64]),\n",
" keras.layers.Conv2DTranspose(16, kernel_size=3, strides=2, padding=\"SAME\", activation=\"selu\"),\n",
" keras.layers.Conv2DTranspose(1, kernel_size=3, strides=2, padding=\"SAME\", activation=\"sigmoid\"),\n",
" keras.layers.Reshape([28, 28])\n",
" tf.keras.layers.Conv2DTranspose(16, kernel_size=3, strides=2, padding=\"SAME\", activation=\"selu\"),\n",
" tf.keras.layers.Conv2DTranspose(1, kernel_size=3, strides=2, padding=\"SAME\", activation=\"sigmoid\"),\n",
" tf.keras.layers.Reshape([28, 28])\n",
"])\n",
"conv_ae = keras.models.Sequential([conv_encoder, conv_decoder])\n",
"conv_ae = tf.keras.Sequential([conv_encoder, conv_decoder])\n",
"\n",
"conv_ae.compile(loss=\"binary_crossentropy\", optimizer=keras.optimizers.SGD(learning_rate=1.0),\n",
"conv_ae.compile(loss=\"binary_crossentropy\", optimizer=tf.keras.optimizers.SGD(learning_rate=1.0),\n",
" metrics=[rounded_accuracy])\n",
"history = conv_ae.fit(X_train, X_train, epochs=5,\n",
" validation_data=(X_valid, X_valid))"
@ -636,17 +636,17 @@
"metadata": {},
"outputs": [],
"source": [
"recurrent_encoder = keras.models.Sequential([\n",
" keras.layers.LSTM(100, return_sequences=True, input_shape=[28, 28]),\n",
" keras.layers.LSTM(30)\n",
"recurrent_encoder = tf.keras.Sequential([\n",
" tf.keras.layers.LSTM(100, return_sequences=True, input_shape=[28, 28]),\n",
" tf.keras.layers.LSTM(30)\n",
"])\n",
"recurrent_decoder = keras.models.Sequential([\n",
" keras.layers.RepeatVector(28, input_shape=[30]),\n",
" keras.layers.LSTM(100, return_sequences=True),\n",
" keras.layers.TimeDistributed(keras.layers.Dense(28, activation=\"sigmoid\"))\n",
"recurrent_decoder = tf.keras.Sequential([\n",
" tf.keras.layers.RepeatVector(28, input_shape=[30]),\n",
" tf.keras.layers.LSTM(100, return_sequences=True),\n",
" tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(28, activation=\"sigmoid\"))\n",
"])\n",
"recurrent_ae = keras.models.Sequential([recurrent_encoder, recurrent_decoder])\n",
"recurrent_ae.compile(loss=\"binary_crossentropy\", optimizer=keras.optimizers.SGD(0.1),\n",
"recurrent_ae = tf.keras.Sequential([recurrent_encoder, recurrent_decoder])\n",
"recurrent_ae.compile(loss=\"binary_crossentropy\", optimizer=tf.keras.optimizers.SGD(0.1),\n",
" metrics=[rounded_accuracy])"
]
},
@ -692,19 +692,19 @@
"tf.random.set_seed(42)\n",
"np.random.seed(42)\n",
"\n",
"denoising_encoder = keras.models.Sequential([\n",
" keras.layers.Flatten(input_shape=[28, 28]),\n",
" keras.layers.GaussianNoise(0.2),\n",
" keras.layers.Dense(100, activation=\"selu\"),\n",
" keras.layers.Dense(30, activation=\"selu\")\n",
"denoising_encoder = tf.keras.Sequential([\n",
" tf.keras.layers.Flatten(input_shape=[28, 28]),\n",
" tf.keras.layers.GaussianNoise(0.2),\n",
" tf.keras.layers.Dense(100, activation=\"selu\"),\n",
" tf.keras.layers.Dense(30, activation=\"selu\")\n",
"])\n",
"denoising_decoder = keras.models.Sequential([\n",
" keras.layers.Dense(100, activation=\"selu\", input_shape=[30]),\n",
" keras.layers.Dense(28 * 28, activation=\"sigmoid\"),\n",
" keras.layers.Reshape([28, 28])\n",
"denoising_decoder = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(100, activation=\"selu\", input_shape=[30]),\n",
" tf.keras.layers.Dense(28 * 28, activation=\"sigmoid\"),\n",
" tf.keras.layers.Reshape([28, 28])\n",
"])\n",
"denoising_ae = keras.models.Sequential([denoising_encoder, denoising_decoder])\n",
"denoising_ae.compile(loss=\"binary_crossentropy\", optimizer=keras.optimizers.SGD(learning_rate=1.0),\n",
"denoising_ae = tf.keras.Sequential([denoising_encoder, denoising_decoder])\n",
"denoising_ae.compile(loss=\"binary_crossentropy\", optimizer=tf.keras.optimizers.SGD(learning_rate=1.0),\n",
" metrics=[rounded_accuracy])\n",
"history = denoising_ae.fit(X_train, X_train, epochs=10,\n",
" validation_data=(X_valid, X_valid))"
@ -719,7 +719,7 @@
"tf.random.set_seed(42)\n",
"np.random.seed(42)\n",
"\n",
"noise = keras.layers.GaussianNoise(0.2)\n",
"noise = tf.keras.layers.GaussianNoise(0.2)\n",
"show_reconstructions(denoising_ae, noise(X_valid, training=True))\n",
"plt.show()"
]
@ -740,19 +740,19 @@
"tf.random.set_seed(42)\n",
"np.random.seed(42)\n",
"\n",
"dropout_encoder = keras.models.Sequential([\n",
" keras.layers.Flatten(input_shape=[28, 28]),\n",
" keras.layers.Dropout(0.5),\n",
" keras.layers.Dense(100, activation=\"selu\"),\n",
" keras.layers.Dense(30, activation=\"selu\")\n",
"dropout_encoder = tf.keras.Sequential([\n",
" tf.keras.layers.Flatten(input_shape=[28, 28]),\n",
" tf.keras.layers.Dropout(0.5),\n",
" tf.keras.layers.Dense(100, activation=\"selu\"),\n",
" tf.keras.layers.Dense(30, activation=\"selu\")\n",
"])\n",
"dropout_decoder = keras.models.Sequential([\n",
" keras.layers.Dense(100, activation=\"selu\", input_shape=[30]),\n",
" keras.layers.Dense(28 * 28, activation=\"sigmoid\"),\n",
" keras.layers.Reshape([28, 28])\n",
"dropout_decoder = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(100, activation=\"selu\", input_shape=[30]),\n",
" tf.keras.layers.Dense(28 * 28, activation=\"sigmoid\"),\n",
" tf.keras.layers.Reshape([28, 28])\n",
"])\n",
"dropout_ae = keras.models.Sequential([dropout_encoder, dropout_decoder])\n",
"dropout_ae.compile(loss=\"binary_crossentropy\", optimizer=keras.optimizers.SGD(learning_rate=1.0),\n",
"dropout_ae = tf.keras.Sequential([dropout_encoder, dropout_decoder])\n",
"dropout_ae.compile(loss=\"binary_crossentropy\", optimizer=tf.keras.optimizers.SGD(learning_rate=1.0),\n",
" metrics=[rounded_accuracy])\n",
"history = dropout_ae.fit(X_train, X_train, epochs=10,\n",
" validation_data=(X_valid, X_valid))"
@ -767,7 +767,7 @@
"tf.random.set_seed(42)\n",
"np.random.seed(42)\n",
"\n",
"dropout = keras.layers.Dropout(0.5)\n",
"dropout = tf.keras.layers.Dropout(0.5)\n",
"show_reconstructions(dropout_ae, dropout(X_valid, training=True))\n",
"save_fig(\"dropout_denoising_plot\", tight_layout=False)"
]
@ -795,18 +795,18 @@
"tf.random.set_seed(42)\n",
"np.random.seed(42)\n",
"\n",
"simple_encoder = keras.models.Sequential([\n",
" keras.layers.Flatten(input_shape=[28, 28]),\n",
" keras.layers.Dense(100, activation=\"selu\"),\n",
" keras.layers.Dense(30, activation=\"sigmoid\"),\n",
"simple_encoder = tf.keras.Sequential([\n",
" tf.keras.layers.Flatten(input_shape=[28, 28]),\n",
" tf.keras.layers.Dense(100, activation=\"selu\"),\n",
" tf.keras.layers.Dense(30, activation=\"sigmoid\"),\n",
"])\n",
"simple_decoder = keras.models.Sequential([\n",
" keras.layers.Dense(100, activation=\"selu\", input_shape=[30]),\n",
" keras.layers.Dense(28 * 28, activation=\"sigmoid\"),\n",
" keras.layers.Reshape([28, 28])\n",
"simple_decoder = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(100, activation=\"selu\", input_shape=[30]),\n",
" tf.keras.layers.Dense(28 * 28, activation=\"sigmoid\"),\n",
" tf.keras.layers.Reshape([28, 28])\n",
"])\n",
"simple_ae = keras.models.Sequential([simple_encoder, simple_decoder])\n",
"simple_ae.compile(loss=\"binary_crossentropy\", optimizer=keras.optimizers.SGD(learning_rate=1.),\n",
"simple_ae = tf.keras.Sequential([simple_encoder, simple_decoder])\n",
"simple_ae.compile(loss=\"binary_crossentropy\", optimizer=tf.keras.optimizers.SGD(learning_rate=1.),\n",
" metrics=[rounded_accuracy])\n",
"history = simple_ae.fit(X_train, X_train, epochs=10,\n",
" validation_data=(X_valid, X_valid))"
@ -905,21 +905,21 @@
"tf.random.set_seed(42)\n",
"np.random.seed(42)\n",
"\n",
"sparse_l1_encoder = keras.models.Sequential([\n",
" keras.layers.Flatten(input_shape=[28, 28]),\n",
" keras.layers.Dense(100, activation=\"selu\"),\n",
" keras.layers.Dense(300, activation=\"sigmoid\"),\n",
" keras.layers.ActivityRegularization(l1=1e-3) # Alternatively, you could add\n",
" # activity_regularizer=keras.regularizers.l1(1e-3)\n",
"sparse_l1_encoder = tf.keras.Sequential([\n",
" tf.keras.layers.Flatten(input_shape=[28, 28]),\n",
" tf.keras.layers.Dense(100, activation=\"selu\"),\n",
" tf.keras.layers.Dense(300, activation=\"sigmoid\"),\n",
" tf.keras.layers.ActivityRegularization(l1=1e-3) # Alternatively, you could add\n",
" # activity_regularizer=tf.keras.regularizers.l1(1e-3)\n",
" # to the previous layer.\n",
"])\n",
"sparse_l1_decoder = keras.models.Sequential([\n",
" keras.layers.Dense(100, activation=\"selu\", input_shape=[300]),\n",
" keras.layers.Dense(28 * 28, activation=\"sigmoid\"),\n",
" keras.layers.Reshape([28, 28])\n",
"sparse_l1_decoder = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(100, activation=\"selu\", input_shape=[300]),\n",
" tf.keras.layers.Dense(28 * 28, activation=\"sigmoid\"),\n",
" tf.keras.layers.Reshape([28, 28])\n",
"])\n",
"sparse_l1_ae = keras.models.Sequential([sparse_l1_encoder, sparse_l1_decoder])\n",
"sparse_l1_ae.compile(loss=\"binary_crossentropy\", optimizer=keras.optimizers.SGD(learning_rate=1.0),\n",
"sparse_l1_ae = tf.keras.Sequential([sparse_l1_encoder, sparse_l1_decoder])\n",
"sparse_l1_ae.compile(loss=\"binary_crossentropy\", optimizer=tf.keras.optimizers.SGD(learning_rate=1.0),\n",
" metrics=[rounded_accuracy])\n",
"history = sparse_l1_ae.fit(X_train, X_train, epochs=10,\n",
" validation_data=(X_valid, X_valid))"
@ -980,10 +980,10 @@
"metadata": {},
"outputs": [],
"source": [
"K = keras.backend\n",
"kl_divergence = keras.losses.kullback_leibler_divergence\n",
"K = tf.keras.backend\n",
"kl_divergence = tf.keras.losses.kullback_leibler_divergence\n",
"\n",
"class KLDivergenceRegularizer(keras.regularizers.Regularizer):\n",
"class KLDivergenceRegularizer(tf.keras.regularizers.Regularizer):\n",
" def __init__(self, weight, target=0.1):\n",
" self.weight = weight\n",
" self.target = target\n",
@ -1004,18 +1004,18 @@
"np.random.seed(42)\n",
"\n",
"kld_reg = KLDivergenceRegularizer(weight=0.05, target=0.1)\n",
"sparse_kl_encoder = keras.models.Sequential([\n",
" keras.layers.Flatten(input_shape=[28, 28]),\n",
" keras.layers.Dense(100, activation=\"selu\"),\n",
" keras.layers.Dense(300, activation=\"sigmoid\", activity_regularizer=kld_reg)\n",
"sparse_kl_encoder = tf.keras.Sequential([\n",
" tf.keras.layers.Flatten(input_shape=[28, 28]),\n",
" tf.keras.layers.Dense(100, activation=\"selu\"),\n",
" tf.keras.layers.Dense(300, activation=\"sigmoid\", activity_regularizer=kld_reg)\n",
"])\n",
"sparse_kl_decoder = keras.models.Sequential([\n",
" keras.layers.Dense(100, activation=\"selu\", input_shape=[300]),\n",
" keras.layers.Dense(28 * 28, activation=\"sigmoid\"),\n",
" keras.layers.Reshape([28, 28])\n",
"sparse_kl_decoder = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(100, activation=\"selu\", input_shape=[300]),\n",
" tf.keras.layers.Dense(28 * 28, activation=\"sigmoid\"),\n",
" tf.keras.layers.Reshape([28, 28])\n",
"])\n",
"sparse_kl_ae = keras.models.Sequential([sparse_kl_encoder, sparse_kl_decoder])\n",
"sparse_kl_ae.compile(loss=\"binary_crossentropy\", optimizer=keras.optimizers.SGD(learning_rate=1.0),\n",
"sparse_kl_ae = tf.keras.Sequential([sparse_kl_encoder, sparse_kl_decoder])\n",
"sparse_kl_ae.compile(loss=\"binary_crossentropy\", optimizer=tf.keras.optimizers.SGD(learning_rate=1.0),\n",
" metrics=[rounded_accuracy])\n",
"history = sparse_kl_ae.fit(X_train, X_train, epochs=10,\n",
" validation_data=(X_valid, X_valid))"
@ -1054,7 +1054,7 @@
"metadata": {},
"outputs": [],
"source": [
"class Sampling(keras.layers.Layer):\n",
"class Sampling(tf.keras.layers.Layer):\n",
" def call(self, inputs):\n",
" mean, log_var = inputs\n",
" return K.random_normal(tf.shape(log_var)) * K.exp(log_var / 2) + mean "
@ -1071,26 +1071,26 @@
"\n",
"codings_size = 10\n",
"\n",
"inputs = keras.layers.Input(shape=[28, 28])\n",
"z = keras.layers.Flatten()(inputs)\n",
"z = keras.layers.Dense(150, activation=\"selu\")(z)\n",
"z = keras.layers.Dense(100, activation=\"selu\")(z)\n",
"codings_mean = keras.layers.Dense(codings_size)(z)\n",
"codings_log_var = keras.layers.Dense(codings_size)(z)\n",
"inputs = tf.keras.layers.Input(shape=[28, 28])\n",
"z = tf.keras.layers.Flatten()(inputs)\n",
"z = tf.keras.layers.Dense(150, activation=\"selu\")(z)\n",
"z = tf.keras.layers.Dense(100, activation=\"selu\")(z)\n",
"codings_mean = tf.keras.layers.Dense(codings_size)(z)\n",
"codings_log_var = tf.keras.layers.Dense(codings_size)(z)\n",
"codings = Sampling()([codings_mean, codings_log_var])\n",
"variational_encoder = keras.models.Model(\n",
"variational_encoder = tf.keras.Model(\n",
" inputs=[inputs], outputs=[codings_mean, codings_log_var, codings])\n",
"\n",
"decoder_inputs = keras.layers.Input(shape=[codings_size])\n",
"x = keras.layers.Dense(100, activation=\"selu\")(decoder_inputs)\n",
"x = keras.layers.Dense(150, activation=\"selu\")(x)\n",
"x = keras.layers.Dense(28 * 28, activation=\"sigmoid\")(x)\n",
"outputs = keras.layers.Reshape([28, 28])(x)\n",
"variational_decoder = keras.models.Model(inputs=[decoder_inputs], outputs=[outputs])\n",
"decoder_inputs = tf.keras.layers.Input(shape=[codings_size])\n",
"x = tf.keras.layers.Dense(100, activation=\"selu\")(decoder_inputs)\n",
"x = tf.keras.layers.Dense(150, activation=\"selu\")(x)\n",
"x = tf.keras.layers.Dense(28 * 28, activation=\"sigmoid\")(x)\n",
"outputs = tf.keras.layers.Reshape([28, 28])(x)\n",
"variational_decoder = tf.keras.Model(inputs=[decoder_inputs], outputs=[outputs])\n",
"\n",
"_, _, codings = variational_encoder(inputs)\n",
"reconstructions = variational_decoder(codings)\n",
"variational_ae = keras.models.Model(inputs=[inputs], outputs=[reconstructions])\n",
"variational_ae = tf.keras.Model(inputs=[inputs], outputs=[reconstructions])\n",
"\n",
"latent_loss = -0.5 * K.sum(\n",
" 1 + codings_log_var - K.exp(codings_log_var) - K.square(codings_mean),\n",
@ -1210,19 +1210,19 @@
"\n",
"codings_size = 30\n",
"\n",
"generator = keras.models.Sequential([\n",
" keras.layers.Dense(100, activation=\"selu\", input_shape=[codings_size]),\n",
" keras.layers.Dense(150, activation=\"selu\"),\n",
" keras.layers.Dense(28 * 28, activation=\"sigmoid\"),\n",
" keras.layers.Reshape([28, 28])\n",
"generator = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(100, activation=\"selu\", input_shape=[codings_size]),\n",
" tf.keras.layers.Dense(150, activation=\"selu\"),\n",
" tf.keras.layers.Dense(28 * 28, activation=\"sigmoid\"),\n",
" tf.keras.layers.Reshape([28, 28])\n",
"])\n",
"discriminator = keras.models.Sequential([\n",
" keras.layers.Flatten(input_shape=[28, 28]),\n",
" keras.layers.Dense(150, activation=\"selu\"),\n",
" keras.layers.Dense(100, activation=\"selu\"),\n",
" keras.layers.Dense(1, activation=\"sigmoid\")\n",
"discriminator = tf.keras.Sequential([\n",
" tf.keras.layers.Flatten(input_shape=[28, 28]),\n",
" tf.keras.layers.Dense(150, activation=\"selu\"),\n",
" tf.keras.layers.Dense(100, activation=\"selu\"),\n",
" tf.keras.layers.Dense(1, activation=\"sigmoid\")\n",
"])\n",
"gan = keras.models.Sequential([generator, discriminator])"
"gan = tf.keras.Sequential([generator, discriminator])"
]
},
{
@ -1325,28 +1325,28 @@
"\n",
"codings_size = 100\n",
"\n",
"generator = keras.models.Sequential([\n",
" keras.layers.Dense(7 * 7 * 128, input_shape=[codings_size]),\n",
" keras.layers.Reshape([7, 7, 128]),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.Conv2DTranspose(64, kernel_size=5, strides=2, padding=\"SAME\",\n",
"generator = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(7 * 7 * 128, input_shape=[codings_size]),\n",
" tf.keras.layers.Reshape([7, 7, 128]),\n",
" tf.keras.layers.BatchNormalization(),\n",
" tf.keras.layers.Conv2DTranspose(64, kernel_size=5, strides=2, padding=\"SAME\",\n",
" activation=\"selu\"),\n",
" keras.layers.BatchNormalization(),\n",
" keras.layers.Conv2DTranspose(1, kernel_size=5, strides=2, padding=\"SAME\",\n",
" tf.keras.layers.BatchNormalization(),\n",
" tf.keras.layers.Conv2DTranspose(1, kernel_size=5, strides=2, padding=\"SAME\",\n",
" activation=\"tanh\"),\n",
"])\n",
"discriminator = keras.models.Sequential([\n",
" keras.layers.Conv2D(64, kernel_size=5, strides=2, padding=\"SAME\",\n",
" activation=keras.layers.LeakyReLU(0.2),\n",
"discriminator = tf.keras.Sequential([\n",
" tf.keras.layers.Conv2D(64, kernel_size=5, strides=2, padding=\"SAME\",\n",
" activation=tf.keras.layers.LeakyReLU(0.2),\n",
" input_shape=[28, 28, 1]),\n",
" keras.layers.Dropout(0.4),\n",
" keras.layers.Conv2D(128, kernel_size=5, strides=2, padding=\"SAME\",\n",
" activation=keras.layers.LeakyReLU(0.2)),\n",
" keras.layers.Dropout(0.4),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(1, activation=\"sigmoid\")\n",
" tf.keras.layers.Dropout(0.4),\n",
" tf.keras.layers.Conv2D(128, kernel_size=5, strides=2, padding=\"SAME\",\n",
" activation=tf.keras.layers.LeakyReLU(0.2)),\n",
" tf.keras.layers.Dropout(0.4),\n",
" tf.keras.layers.Flatten(),\n",
" tf.keras.layers.Dense(1, activation=\"sigmoid\")\n",
"])\n",
"gan = keras.models.Sequential([generator, discriminator])"
"gan = tf.keras.Sequential([generator, discriminator])"
]
},
{
@ -1432,7 +1432,7 @@
"metadata": {},
"outputs": [],
"source": [
"(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data()\n",
"(X_train_full, y_train_full), (X_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()\n",
"X_train_full = X_train_full.astype(np.float32) / 255\n",
"X_test = X_test.astype(np.float32) / 255\n",
"X_train, X_valid = X_train_full[:-5000], X_train_full[-5000:]\n",
@ -1455,19 +1455,19 @@
"tf.random.set_seed(42)\n",
"np.random.seed(42)\n",
"\n",
"hashing_encoder = keras.models.Sequential([\n",
" keras.layers.Flatten(input_shape=[28, 28]),\n",
" keras.layers.Dense(100, activation=\"selu\"),\n",
" keras.layers.GaussianNoise(15.),\n",
" keras.layers.Dense(16, activation=\"sigmoid\"),\n",
"hashing_encoder = tf.keras.Sequential([\n",
" tf.keras.layers.Flatten(input_shape=[28, 28]),\n",
" tf.keras.layers.Dense(100, activation=\"selu\"),\n",
" tf.keras.layers.GaussianNoise(15.),\n",
" tf.keras.layers.Dense(16, activation=\"sigmoid\"),\n",
"])\n",
"hashing_decoder = keras.models.Sequential([\n",
" keras.layers.Dense(100, activation=\"selu\", input_shape=[16]),\n",
" keras.layers.Dense(28 * 28, activation=\"sigmoid\"),\n",
" keras.layers.Reshape([28, 28])\n",
"hashing_decoder = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(100, activation=\"selu\", input_shape=[16]),\n",
" tf.keras.layers.Dense(28 * 28, activation=\"sigmoid\"),\n",
" tf.keras.layers.Reshape([28, 28])\n",
"])\n",
"hashing_ae = keras.models.Sequential([hashing_encoder, hashing_decoder])\n",
"hashing_ae.compile(loss=\"binary_crossentropy\", optimizer=keras.optimizers.Nadam(),\n",
"hashing_ae = tf.keras.Sequential([hashing_encoder, hashing_decoder])\n",
"hashing_ae.compile(loss=\"binary_crossentropy\", optimizer=tf.keras.optimizers.Nadam(),\n",
" metrics=[rounded_accuracy])\n",
"history = hashing_ae.fit(X_train, X_train, epochs=10,\n",
" validation_data=(X_valid, X_valid))"
@ -1590,7 +1590,7 @@
"metadata": {},
"outputs": [],
"source": [
"[X_train, y_train], [X_test, y_test] = keras.datasets.cifar10.load_data()\n",
"[X_train, y_train], [X_test, y_test] = tf.keras.datasets.cifar10.load_data()\n",
"X_train = X_train / 255\n",
"X_test = X_test / 255"
]
@ -1604,12 +1604,12 @@
"tf.random.set_seed(42)\n",
"np.random.seed(42)\n",
"\n",
"denoising_encoder = keras.models.Sequential([\n",
" keras.layers.GaussianNoise(0.1, input_shape=[32, 32, 3]),\n",
" keras.layers.Conv2D(32, kernel_size=3, padding=\"same\", activation=\"relu\"),\n",
" keras.layers.MaxPool2D(),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(512, activation=\"relu\"),\n",
"denoising_encoder = tf.keras.Sequential([\n",
" tf.keras.layers.GaussianNoise(0.1, input_shape=[32, 32, 3]),\n",
" tf.keras.layers.Conv2D(32, kernel_size=3, padding=\"same\", activation=\"relu\"),\n",
" tf.keras.layers.MaxPool2D(),\n",
" tf.keras.layers.Flatten(),\n",
" tf.keras.layers.Dense(512, activation=\"relu\"),\n",
"])"
]
},
@ -1628,10 +1628,10 @@
"metadata": {},
"outputs": [],
"source": [
"denoising_decoder = keras.models.Sequential([\n",
" keras.layers.Dense(16 * 16 * 32, activation=\"relu\", input_shape=[512]),\n",
" keras.layers.Reshape([16, 16, 32]),\n",
" keras.layers.Conv2DTranspose(filters=3, kernel_size=3, strides=2,\n",
"denoising_decoder = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(16 * 16 * 32, activation=\"relu\", input_shape=[512]),\n",
" tf.keras.layers.Reshape([16, 16, 32]),\n",
" tf.keras.layers.Conv2DTranspose(filters=3, kernel_size=3, strides=2,\n",
" padding=\"same\", activation=\"sigmoid\")\n",
"])"
]
@ -1651,8 +1651,8 @@
"metadata": {},
"outputs": [],
"source": [
"denoising_ae = keras.models.Sequential([denoising_encoder, denoising_decoder])\n",
"denoising_ae.compile(loss=\"binary_crossentropy\", optimizer=keras.optimizers.Nadam(),\n",
"denoising_ae = tf.keras.Sequential([denoising_encoder, denoising_decoder])\n",
"denoising_ae.compile(loss=\"binary_crossentropy\", optimizer=tf.keras.optimizers.Nadam(),\n",
" metrics=[\"mse\"])\n",
"history = denoising_ae.fit(X_train, X_train, epochs=10,\n",
" validation_data=(X_test, X_test))"

View File

@ -575,15 +575,15 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"tf.random.set_seed(42)\n",
"np.random.seed(42)\n",
"\n",
"n_inputs = 4 # == env.observation_space.shape[0]\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.Dense(5, activation=\"elu\", input_shape=[n_inputs]),\n",
" keras.layers.Dense(1, activation=\"sigmoid\"),\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(5, activation=\"elu\", input_shape=[n_inputs]),\n",
" tf.keras.layers.Dense(1, activation=\"sigmoid\"),\n",
"])"
]
},
@ -676,8 +676,8 @@
" env.seed(index)\n",
"np.random.seed(42)\n",
"observations = [env.reset() for env in envs]\n",
"optimizer = keras.optimizers.RMSprop()\n",
"loss_fn = keras.losses.binary_crossentropy\n",
"optimizer = tf.keras.optimizers.RMSprop()\n",
"loss_fn = tf.keras.losses.binary_crossentropy\n",
"\n",
"for iteration in range(n_iterations):\n",
" # if angle < 0, we want proba(left) = 1., or else proba(left) = 0.\n",
@ -874,8 +874,8 @@
"metadata": {},
"outputs": [],
"source": [
"optimizer = keras.optimizers.Adam(learning_rate=0.01)\n",
"loss_fn = keras.losses.binary_crossentropy"
"optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)\n",
"loss_fn = tf.keras.losses.binary_crossentropy"
]
},
{
@ -884,13 +884,13 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.Dense(5, activation=\"elu\", input_shape=[4]),\n",
" keras.layers.Dense(1, activation=\"sigmoid\"),\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(5, activation=\"elu\", input_shape=[4]),\n",
" tf.keras.layers.Dense(1, activation=\"sigmoid\"),\n",
"])"
]
},
@ -1277,7 +1277,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"tf.random.set_seed(42)\n",
"np.random.seed(42)\n",
"\n",
@ -1285,10 +1285,10 @@
"input_shape = [4] # == env.observation_space.shape\n",
"n_outputs = 2 # == env.action_space.n\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.Dense(32, activation=\"elu\", input_shape=input_shape),\n",
" keras.layers.Dense(32, activation=\"elu\"),\n",
" keras.layers.Dense(n_outputs)\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(32, activation=\"elu\", input_shape=input_shape),\n",
" tf.keras.layers.Dense(32, activation=\"elu\"),\n",
" tf.keras.layers.Dense(n_outputs)\n",
"])"
]
},
@ -1392,8 +1392,8 @@
"source": [
"batch_size = 32\n",
"discount_rate = 0.95\n",
"optimizer = keras.optimizers.Adam(learning_rate=1e-2)\n",
"loss_fn = keras.losses.mean_squared_error\n",
"optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2)\n",
"loss_fn = tf.keras.losses.mean_squared_error\n",
"\n",
"def training_step(batch_size):\n",
" experiences = sample_experiences(batch_size)\n",
@ -1515,17 +1515,17 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"tf.random.set_seed(42)\n",
"np.random.seed(42)\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.Dense(32, activation=\"elu\", input_shape=[4]),\n",
" keras.layers.Dense(32, activation=\"elu\"),\n",
" keras.layers.Dense(n_outputs)\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(32, activation=\"elu\", input_shape=[4]),\n",
" tf.keras.layers.Dense(32, activation=\"elu\"),\n",
" tf.keras.layers.Dense(n_outputs)\n",
"])\n",
"\n",
"target = keras.models.clone_model(model)\n",
"target = tf.keras.models.clone_model(model)\n",
"target.set_weights(model.get_weights())"
]
},
@ -1537,8 +1537,8 @@
"source": [
"batch_size = 32\n",
"discount_rate = 0.95\n",
"optimizer = keras.optimizers.Adam(learning_rate=6e-3)\n",
"loss_fn = keras.losses.Huber()\n",
"optimizer = tf.keras.optimizers.Adam(learning_rate=6e-3)\n",
"loss_fn = tf.keras.losses.Huber()\n",
"\n",
"def training_step(batch_size):\n",
" experiences = sample_experiences(batch_size)\n",
@ -1659,21 +1659,21 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"tf.random.set_seed(42)\n",
"np.random.seed(42)\n",
"\n",
"K = keras.backend\n",
"input_states = keras.layers.Input(shape=[4])\n",
"hidden1 = keras.layers.Dense(32, activation=\"elu\")(input_states)\n",
"hidden2 = keras.layers.Dense(32, activation=\"elu\")(hidden1)\n",
"state_values = keras.layers.Dense(1)(hidden2)\n",
"raw_advantages = keras.layers.Dense(n_outputs)(hidden2)\n",
"K = tf.keras.backend\n",
"input_states = tf.keras.layers.Input(shape=[4])\n",
"hidden1 = tf.keras.layers.Dense(32, activation=\"elu\")(input_states)\n",
"hidden2 = tf.keras.layers.Dense(32, activation=\"elu\")(hidden1)\n",
"state_values = tf.keras.layers.Dense(1)(hidden2)\n",
"raw_advantages = tf.keras.layers.Dense(n_outputs)(hidden2)\n",
"advantages = raw_advantages - K.max(raw_advantages, axis=1, keepdims=True)\n",
"Q_values = state_values + advantages\n",
"model = keras.models.Model(inputs=[input_states], outputs=[Q_values])\n",
"model = tf.keras.Model(inputs=[input_states], outputs=[Q_values])\n",
"\n",
"target = keras.models.clone_model(model)\n",
"target = tf.keras.models.clone_model(model)\n",
"target.set_weights(model.get_weights())"
]
},
@ -1685,8 +1685,8 @@
"source": [
"batch_size = 32\n",
"discount_rate = 0.95\n",
"optimizer = keras.optimizers.Adam(learning_rate=7.5e-3)\n",
"loss_fn = keras.losses.Huber()\n",
"optimizer = tf.keras.optimizers.Adam(learning_rate=7.5e-3)\n",
"loss_fn = tf.keras.losses.Huber()\n",
"\n",
"def training_step(batch_size):\n",
" experiences = sample_experiences(batch_size)\n",
@ -2180,7 +2180,7 @@
"source": [
"from tf_agents.networks.q_network import QNetwork\n",
"\n",
"preprocessing_layer = keras.layers.Lambda(\n",
"preprocessing_layer = tf.keras.layers.Lambda(\n",
" lambda obs: tf.cast(obs, np.float32) / 255.)\n",
"conv_layer_params=[(32, (8, 8), 4), (64, (4, 4), 2), (64, (3, 3), 1)]\n",
"fc_layer_params=[512]\n",
@ -2210,9 +2210,9 @@
"\n",
"train_step = tf.Variable(0)\n",
"update_period = 4 # run a training step every 4 collect steps\n",
"optimizer = keras.optimizers.RMSprop(learning_rate=2.5e-4, rho=0.95, momentum=0.0,\n",
"optimizer = tf.keras.optimizers.RMSprop(learning_rate=2.5e-4, rho=0.95, momentum=0.0,\n",
" epsilon=0.00001, centered=True)\n",
"epsilon_fn = keras.optimizers.schedules.PolynomialDecay(\n",
"epsilon_fn = tf.keras.optimizers.schedules.PolynomialDecay(\n",
" initial_learning_rate=1.0, # initial ε\n",
" decay_steps=250000 // update_period, # <=> 1,000,000 ALE frames\n",
" end_learning_rate=0.01) # final ε\n",
@ -2221,7 +2221,7 @@
" q_network=q_net,\n",
" optimizer=optimizer,\n",
" target_update_period=2000, # <=> 32,000 ALE frames\n",
" td_errors_loss_fn=keras.losses.Huber(reduction=\"none\"),\n",
" td_errors_loss_fn=tf.keras.losses.Huber(reduction=\"none\"),\n",
" gamma=0.99, # discount factor\n",
" train_step_counter=train_step,\n",
" epsilon_greedy=lambda: epsilon_fn(train_step))\n",
@ -2911,17 +2911,17 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"n_inputs = env.observation_space.shape[0]\n",
"n_outputs = env.action_space.n\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.Dense(32, activation=\"relu\", input_shape=[n_inputs]),\n",
" keras.layers.Dense(32, activation=\"relu\"),\n",
" keras.layers.Dense(n_outputs, activation=\"softmax\"),\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Dense(32, activation=\"relu\", input_shape=[n_inputs]),\n",
" tf.keras.layers.Dense(32, activation=\"relu\"),\n",
" tf.keras.layers.Dense(n_outputs, activation=\"softmax\"),\n",
"])"
]
},
@ -2949,7 +2949,7 @@
"def lander_play_one_step(env, obs, model, loss_fn):\n",
" with tf.GradientTape() as tape:\n",
" probas = model(obs[np.newaxis])\n",
" logits = tf.math.log(probas + keras.backend.epsilon())\n",
" logits = tf.math.log(probas + tf.keras.backend.epsilon())\n",
" action = tf.random.categorical(logits, num_samples=1)\n",
" loss = tf.reduce_mean(loss_fn(action, probas))\n",
" grads = tape.gradient(loss, model.trainable_variables)\n",
@ -3035,8 +3035,8 @@
"metadata": {},
"outputs": [],
"source": [
"optimizer = keras.optimizers.Nadam(learning_rate=0.005)\n",
"loss_fn = keras.losses.sparse_categorical_crossentropy"
"optimizer = tf.keras.optimizers.Nadam(learning_rate=0.005)\n",
"loss_fn = tf.keras.losses.sparse_categorical_crossentropy"
]
},
{
@ -3120,7 +3120,7 @@
" for step in range(n_max_steps):\n",
" frames.append(env.render(mode=\"rgb_array\"))\n",
" probas = model(obs[np.newaxis])\n",
" logits = tf.math.log(probas + keras.backend.epsilon())\n",
" logits = tf.math.log(probas + tf.keras.backend.epsilon())\n",
" action = tf.random.categorical(logits, num_samples=1)\n",
" obs, reward, done, info = env.step(action[0, 0].numpy())\n",
" if done:\n",

View File

@ -127,7 +127,7 @@
"metadata": {},
"outputs": [],
"source": [
"(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()\n",
"(X_train_full, y_train_full), (X_test, y_test) = tf.keras.datasets.mnist.load_data()\n",
"X_train_full = X_train_full[..., np.newaxis].astype(np.float32) / 255.\n",
"X_test = X_test[..., np.newaxis].astype(np.float32) / 255.\n",
"X_valid, X_train = X_train_full[:5000], X_train_full[5000:]\n",
@ -144,13 +144,13 @@
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.Flatten(input_shape=[28, 28, 1]),\n",
" keras.layers.Dense(100, activation=\"relu\"),\n",
" keras.layers.Dense(10, activation=\"softmax\")\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Flatten(input_shape=[28, 28, 1]),\n",
" tf.keras.layers.Dense(100, activation=\"relu\"),\n",
" tf.keras.layers.Dense(10, activation=\"softmax\")\n",
"])\n",
"model.compile(loss=\"sparse_categorical_crossentropy\",\n",
" optimizer=keras.optimizers.SGD(learning_rate=1e-2),\n",
" optimizer=tf.keras.optimizers.SGD(learning_rate=1e-2),\n",
" metrics=[\"accuracy\"])\n",
"model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))"
]
@ -560,14 +560,14 @@
"np.random.seed(42)\n",
"tf.random.set_seed(42)\n",
"\n",
"model = keras.models.Sequential([\n",
" keras.layers.Flatten(input_shape=[28, 28, 1]),\n",
" keras.layers.Dense(50, activation=\"relu\"),\n",
" keras.layers.Dense(50, activation=\"relu\"),\n",
" keras.layers.Dense(10, activation=\"softmax\")\n",
"model = tf.keras.Sequential([\n",
" tf.keras.layers.Flatten(input_shape=[28, 28, 1]),\n",
" tf.keras.layers.Dense(50, activation=\"relu\"),\n",
" tf.keras.layers.Dense(50, activation=\"relu\"),\n",
" tf.keras.layers.Dense(10, activation=\"softmax\")\n",
"])\n",
"model.compile(loss=\"sparse_categorical_crossentropy\",\n",
" optimizer=keras.optimizers.SGD(learning_rate=1e-2),\n",
" optimizer=tf.keras.optimizers.SGD(learning_rate=1e-2),\n",
" metrics=[\"accuracy\"])\n",
"history = model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))"
]
@ -774,7 +774,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"tf.random.set_seed(42)\n",
"np.random.seed(42)"
]
@ -786,19 +786,19 @@
"outputs": [],
"source": [
"def create_model():\n",
" return keras.models.Sequential([\n",
" keras.layers.Conv2D(filters=64, kernel_size=7, activation=\"relu\",\n",
" return tf.keras.Sequential([\n",
" tf.keras.layers.Conv2D(filters=64, kernel_size=7, activation=\"relu\",\n",
" padding=\"same\", input_shape=[28, 28, 1]),\n",
" keras.layers.MaxPooling2D(pool_size=2),\n",
" keras.layers.Conv2D(filters=128, kernel_size=3, activation=\"relu\",\n",
" tf.keras.layers.MaxPooling2D(pool_size=2),\n",
" tf.keras.layers.Conv2D(filters=128, kernel_size=3, activation=\"relu\",\n",
" padding=\"same\"), \n",
" keras.layers.Conv2D(filters=128, kernel_size=3, activation=\"relu\",\n",
" tf.keras.layers.Conv2D(filters=128, kernel_size=3, activation=\"relu\",\n",
" padding=\"same\"),\n",
" keras.layers.MaxPooling2D(pool_size=2),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(units=64, activation='relu'),\n",
" keras.layers.Dropout(0.5),\n",
" keras.layers.Dense(units=10, activation='softmax'),\n",
" tf.keras.layers.MaxPooling2D(pool_size=2),\n",
" tf.keras.layers.Flatten(),\n",
" tf.keras.layers.Dense(units=64, activation='relu'),\n",
" tf.keras.layers.Dropout(0.5),\n",
" tf.keras.layers.Dense(units=10, activation='softmax'),\n",
" ])"
]
},
@ -811,7 +811,7 @@
"batch_size = 100\n",
"model = create_model()\n",
"model.compile(loss=\"sparse_categorical_crossentropy\",\n",
" optimizer=keras.optimizers.SGD(learning_rate=1e-2),\n",
" optimizer=tf.keras.optimizers.SGD(learning_rate=1e-2),\n",
" metrics=[\"accuracy\"])\n",
"model.fit(X_train, y_train, epochs=10,\n",
" validation_data=(X_valid, y_valid), batch_size=batch_size)"
@ -823,7 +823,7 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"tf.random.set_seed(42)\n",
"np.random.seed(42)\n",
"\n",
@ -851,7 +851,7 @@
"with distribution.scope():\n",
" model = create_model()\n",
" model.compile(loss=\"sparse_categorical_crossentropy\",\n",
" optimizer=keras.optimizers.SGD(learning_rate=1e-2),\n",
" optimizer=tf.keras.optimizers.SGD(learning_rate=1e-2),\n",
" metrics=[\"accuracy\"])"
]
},
@ -888,17 +888,17 @@
"metadata": {},
"outputs": [],
"source": [
"keras.backend.clear_session()\n",
"tf.keras.backend.clear_session()\n",
"tf.random.set_seed(42)\n",
"np.random.seed(42)\n",
"\n",
"K = keras.backend\n",
"K = tf.keras.backend\n",
"\n",
"distribution = tf.distribute.MirroredStrategy()\n",
"\n",
"with distribution.scope():\n",
" model = create_model()\n",
" optimizer = keras.optimizers.SGD()\n",
" optimizer = tf.keras.optimizers.SGD()\n",
"\n",
"with distribution.scope():\n",
" dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train)).repeat().batch(batch_size)\n",
@ -910,7 +910,7 @@
" X, y = inputs\n",
" with tf.GradientTape() as tape:\n",
" Y_proba = model(X)\n",
" loss = K.sum(keras.losses.sparse_categorical_crossentropy(y, Y_proba)) / batch_size\n",
" loss = K.sum(tf.keras.losses.sparse_categorical_crossentropy(y, Y_proba)) / batch_size\n",
"\n",
" grads = tape.gradient(loss, model.trainable_variables)\n",
" optimizer.apply_gradients(zip(grads, model.trainable_variables))\n",
@ -1072,36 +1072,36 @@
" run_id = time.strftime(\"run_%Y_%m_%d-%H_%M_%S\")\n",
" run_dir = root_logdir / run_id\n",
" callbacks = [\n",
" keras.callbacks.TensorBoard(run_dir),\n",
" keras.callbacks.ModelCheckpoint(\"my_mnist_multiworker_model.h5\",\n",
" tf.keras.callbacks.TensorBoard(run_dir),\n",
" tf.keras.callbacks.ModelCheckpoint(\"my_mnist_multiworker_model.h5\",\n",
" save_best_only=True),\n",
" ]\n",
"else:\n",
" callbacks = []\n",
"\n",
"# Load and prepare the MNIST dataset\n",
"(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()\n",
"(X_train_full, y_train_full), (X_test, y_test) = tf.keras.datasets.mnist.load_data()\n",
"X_train_full = X_train_full[..., np.newaxis] / 255.\n",
"X_valid, X_train = X_train_full[:5000], X_train_full[5000:]\n",
"y_valid, y_train = y_train_full[:5000], y_train_full[5000:]\n",
"\n",
"with distribution.scope():\n",
" model = keras.models.Sequential([\n",
" keras.layers.Conv2D(filters=64, kernel_size=7, activation=\"relu\",\n",
" model = tf.keras.Sequential([\n",
" tf.keras.layers.Conv2D(filters=64, kernel_size=7, activation=\"relu\",\n",
" padding=\"same\", input_shape=[28, 28, 1]),\n",
" keras.layers.MaxPooling2D(pool_size=2),\n",
" keras.layers.Conv2D(filters=128, kernel_size=3, activation=\"relu\",\n",
" tf.keras.layers.MaxPooling2D(pool_size=2),\n",
" tf.keras.layers.Conv2D(filters=128, kernel_size=3, activation=\"relu\",\n",
" padding=\"same\"), \n",
" keras.layers.Conv2D(filters=128, kernel_size=3, activation=\"relu\",\n",
" tf.keras.layers.Conv2D(filters=128, kernel_size=3, activation=\"relu\",\n",
" padding=\"same\"),\n",
" keras.layers.MaxPooling2D(pool_size=2),\n",
" keras.layers.Flatten(),\n",
" keras.layers.Dense(units=64, activation='relu'),\n",
" keras.layers.Dropout(0.5),\n",
" keras.layers.Dense(units=10, activation='softmax'),\n",
" tf.keras.layers.MaxPooling2D(pool_size=2),\n",
" tf.keras.layers.Flatten(),\n",
" tf.keras.layers.Dense(units=64, activation='relu'),\n",
" tf.keras.layers.Dropout(0.5),\n",
" tf.keras.layers.Dense(units=10, activation='softmax'),\n",
" ])\n",
" model.compile(loss=\"sparse_categorical_crossentropy\",\n",
" optimizer=keras.optimizers.SGD(learning_rate=1e-2),\n",
" optimizer=tf.keras.optimizers.SGD(learning_rate=1e-2),\n",
" metrics=[\"accuracy\"])\n",
"\n",
"model.fit(X_train, y_train, validation_data=(X_valid, y_valid),\n",
@ -1172,7 +1172,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"That's it! Once training is over, the best checkpoint of the model will be available in the `my_mnist_multiworker_model.h5` file. You can load it using `keras.models.load_model()` and use it for predictions, as usual:"
"That's it! Once training is over, the best checkpoint of the model will be available in the `my_mnist_multiworker_model.h5` file. You can load it using `tf.keras.models.load_model()` and use it for predictions, as usual:"
]
},
{
@ -1183,7 +1183,7 @@
"source": [
"from tensorflow import keras\n",
"\n",
"model = keras.models.load_model(\"my_mnist_multiworker_model.h5\")\n",
"model = tf.keras.models.load_model(\"my_mnist_multiworker_model.h5\")\n",
"Y_pred = model.predict(X_new)\n",
"np.argmax(Y_pred, axis=-1)"
]