diff --git a/12_custom_models_and_training_with_tensorflow.ipynb b/12_custom_models_and_training_with_tensorflow.ipynb
index 3dec28f..be59c78 100644
--- a/12_custom_models_and_training_with_tensorflow.ipynb
+++ b/12_custom_models_and_training_with_tensorflow.ipynb
@@ -14,6 +14,17 @@
"_This notebook contains all the sample code in chapter 12._"
]
},
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "
"
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {},
@@ -25,7 +36,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0-preview."
+ "First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0."
]
},
{
@@ -42,7 +53,13 @@
"import sklearn\n",
"assert sklearn.__version__ >= \"0.20\"\n",
"\n",
- "# TensorFlow ≥2.0-preview is required\n",
+ "try:\n",
+ " # %tensorflow_version only exists in Colab.\n",
+ " %tensorflow_version 2.x\n",
+ "except Exception:\n",
+ " pass\n",
+ "\n",
+ "# TensorFlow ≥2.0 is required\n",
"import tensorflow as tf\n",
"from tensorflow import keras\n",
"assert tf.__version__ >= \"2.0\"\n",
@@ -1141,17 +1158,14 @@
"metadata": {},
"outputs": [],
"source": [
- "# TODO: \n",
- "\"\"\"\n",
"model = keras.models.load_model(\n",
" \"my_model_with_many_custom_parts.h5\",\n",
" custom_objects={\n",
- " \"my_l1_regularizer\": my_l1_regularizer(0.01),\n",
- " \"my_positive_weights\": my_positive_weights,\n",
+ " \"my_l1_regularizer\": my_l1_regularizer,\n",
+ " \"my_positive_weights\": lambda: my_positive_weights,\n",
" \"my_glorot_initializer\": my_glorot_initializer,\n",
" \"my_softplus\": my_softplus,\n",
- " })\n",
- "\"\"\""
+ " })"
]
},
{
@@ -1219,17 +1233,14 @@
"metadata": {},
"outputs": [],
"source": [
- "# TODO: check https://github.com/tensorflow/tensorflow/issues/26061\n",
- "\"\"\"\n",
"model = keras.models.load_model(\n",
" \"my_model_with_many_custom_parts.h5\",\n",
" custom_objects={\n",
" \"MyL1Regularizer\": MyL1Regularizer,\n",
- " \"my_positive_weights\": my_positive_weights,\n",
+ " \"my_positive_weights\": lambda: my_positive_weights,\n",
" \"my_glorot_initializer\": my_glorot_initializer,\n",
" \"my_softplus\": my_softplus,\n",
- " })\n",
- "\"\"\""
+ " })"
]
},
{
@@ -1379,9 +1390,15 @@
" def __init__(self, threshold=1.0, **kwargs):\n",
" super().__init__(**kwargs) # handles base args (e.g., dtype)\n",
" self.threshold = threshold\n",
- " self.huber_fn = create_huber(threshold)\n",
+ " #self.huber_fn = create_huber(threshold) # TODO: investigate why this fails\n",
" self.total = self.add_weight(\"total\", initializer=\"zeros\")\n",
" self.count = self.add_weight(\"count\", initializer=\"zeros\")\n",
+ " def huber_fn(self, y_true, y_pred): # workaround\n",
+ " error = y_true - y_pred\n",
+ " is_small_error = tf.abs(error) < self.threshold\n",
+ " squared_loss = tf.square(error) / 2\n",
+ " linear_loss = self.threshold * tf.abs(error) - self.threshold**2 / 2\n",
+ " return tf.where(is_small_error, squared_loss, linear_loss)\n",
" def update_state(self, y_true, y_pred, sample_weight=None):\n",
" metric = self.huber_fn(y_true, y_pred)\n",
" self.total.assign_add(tf.reduce_sum(metric))\n",
@@ -1475,7 +1492,7 @@
"metadata": {},
"outputs": [],
"source": [
- "model.fit(X_train_scaled, y_train, epochs=2)"
+ "model.fit(X_train_scaled.astype(np.float32), y_train.astype(np.float32), epochs=2)"
]
},
{
@@ -1504,7 +1521,7 @@
"metadata": {},
"outputs": [],
"source": [
- "model.fit(X_train_scaled, y_train, epochs=2)"
+ "model.fit(X_train_scaled.astype(np.float32), y_train.astype(np.float32), epochs=2)"
]
},
{
@@ -1862,10 +1879,10 @@
" Z = layer(Z)\n",
" return inputs + Z\n",
" \n",
- " def get_config(self): # not shown\n",
- " base_config = super().get_config() # not shown\n",
- " return {**base_config, # not shown\n",
- " \"n_layers\": self.n_layers, \"n_neurons\": n_neurons} # not shown"
+ " def get_config(self): # not shown\n",
+ " base_config = super().get_config() # not shown\n",
+ " return {**base_config, # not shown\n",
+ " \"n_layers\": self.n_layers, \"n_neurons\": self.n_neurons} # not shown"
]
},
{
@@ -1916,13 +1933,25 @@
"metadata": {},
"outputs": [],
"source": [
- "#TODO: check that persistence ends up working in TF2\n",
- "#model.save(\"my_custom_model.h5\")\n",
- "#model = keras.models.load_model(\"my_custom_model.h5\",\n",
- "# custom_objects={\n",
- "# \"ResidualBlock\": ResidualBlock,\n",
- "# \"ResidualRegressor\": ResidualRegressor\n",
- "# })"
+ "model.save(\"my_custom_model.ckpt\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 146,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "model = keras.models.load_model(\"my_custom_model.ckpt\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 147,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "history = model.fit(X_train_scaled, y_train, epochs=5)"
]
},
{
@@ -1934,7 +1963,7 @@
},
{
"cell_type": "code",
- "execution_count": 146,
+ "execution_count": 148,
"metadata": {},
"outputs": [],
"source": [
@@ -1949,7 +1978,7 @@
},
{
"cell_type": "code",
- "execution_count": 147,
+ "execution_count": 149,
"metadata": {},
"outputs": [],
"source": [
@@ -1967,11 +1996,11 @@
]
},
{
- "cell_type": "markdown",
+ "cell_type": "code",
+ "execution_count": 150,
"metadata": {},
+ "outputs": [],
"source": [
- "TODO: check https://github.com/tensorflow/tensorflow/issues/26260\n",
- "```python\n",
"class ReconstructingRegressor(keras.models.Model):\n",
" def __init__(self, output_dim, **kwargs):\n",
" super().__init__(**kwargs)\n",
@@ -1979,70 +2008,34 @@
" kernel_initializer=\"lecun_normal\")\n",
" for _ in range(5)]\n",
" self.out = keras.layers.Dense(output_dim)\n",
- " self.reconstruction_mean = keras.metrics.Mean(name=\"reconstruction_error\")\n",
+ " # TODO: check https://github.com/tensorflow/tensorflow/issues/26260\n",
+ " #self.reconstruction_mean = keras.metrics.Mean(name=\"reconstruction_error\")\n",
"\n",
" def build(self, batch_input_shape):\n",
" n_inputs = batch_input_shape[-1]\n",
" self.reconstruct = keras.layers.Dense(n_inputs)\n",
" super().build(batch_input_shape)\n",
"\n",
- " @tf.function\n",
" def call(self, inputs, training=None):\n",
" Z = inputs\n",
" for layer in self.hidden:\n",
" Z = layer(Z)\n",
" reconstruction = self.reconstruct(Z)\n",
" recon_loss = tf.reduce_mean(tf.square(reconstruction - inputs))\n",
- " self.add_loss(0.05 * reconstruction_loss)\n",
- " if training:\n",
- " result = self.reconstruction_mean(recon_loss)\n",
- " self.add_metric(result)\n",
- " return self.out(Z)\n",
- "\n",
- "model = ReconstructingRegressor(1)\n",
- "model.build(tf.TensorShape([None, 8])) # <= Fails if this line is removed\n",
- "model.compile(loss=\"mse\", optimizer=\"nadam\")\n",
- "history = model.fit(X, y, epochs=2)\n",
- "```"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 148,
- "metadata": {},
- "outputs": [],
- "source": [
- "class ReconstructingRegressor(keras.models.Model):\n",
- " def __init__(self, output_dim, **kwargs):\n",
- " super().__init__(**kwargs)\n",
- " self.hidden = [keras.layers.Dense(30, activation=\"selu\",\n",
- " kernel_initializer=\"lecun_normal\")\n",
- " for _ in range(5)]\n",
- " self.out = keras.layers.Dense(output_dim)\n",
- "\n",
- " def build(self, batch_input_shape):\n",
- " n_inputs = batch_input_shape[-1]\n",
- " self.reconstruct = keras.layers.Dense(n_inputs)\n",
- " super().build(batch_input_shape)\n",
- "\n",
- " def call(self, inputs):\n",
- " Z = inputs\n",
- " for layer in self.hidden:\n",
- " Z = layer(Z)\n",
- " reconstruction = self.reconstruct(Z)\n",
- " recon_loss = tf.reduce_mean(tf.square(reconstruction - inputs))\n",
" self.add_loss(0.05 * recon_loss)\n",
+ " #if training:\n",
+ " # result = self.reconstruction_mean(recon_loss)\n",
+ " # self.add_metric(result)\n",
" return self.out(Z)"
]
},
{
"cell_type": "code",
- "execution_count": 149,
+ "execution_count": 151,
"metadata": {},
"outputs": [],
"source": [
"model = ReconstructingRegressor(1)\n",
- "model.build(tf.TensorShape([None, 8])) # TODO: check https://github.com/tensorflow/tensorflow/issues/26274\n",
"model.compile(loss=\"mse\", optimizer=\"nadam\")\n",
"history = model.fit(X_train_scaled, y_train, epochs=2)\n",
"y_pred = model.predict(X_test_scaled)"
@@ -2057,7 +2050,7 @@
},
{
"cell_type": "code",
- "execution_count": 150,
+ "execution_count": 152,
"metadata": {},
"outputs": [],
"source": [
@@ -2067,7 +2060,7 @@
},
{
"cell_type": "code",
- "execution_count": 151,
+ "execution_count": 153,
"metadata": {},
"outputs": [],
"source": [
@@ -2078,7 +2071,7 @@
},
{
"cell_type": "code",
- "execution_count": 152,
+ "execution_count": 154,
"metadata": {},
"outputs": [],
"source": [
@@ -2087,7 +2080,7 @@
},
{
"cell_type": "code",
- "execution_count": 153,
+ "execution_count": 155,
"metadata": {},
"outputs": [],
"source": [
@@ -2100,7 +2093,7 @@
},
{
"cell_type": "code",
- "execution_count": 154,
+ "execution_count": 156,
"metadata": {},
"outputs": [],
"source": [
@@ -2109,7 +2102,7 @@
},
{
"cell_type": "code",
- "execution_count": 155,
+ "execution_count": 157,
"metadata": {},
"outputs": [],
"source": [
@@ -2125,7 +2118,7 @@
},
{
"cell_type": "code",
- "execution_count": 156,
+ "execution_count": 158,
"metadata": {},
"outputs": [],
"source": [
@@ -2139,7 +2132,7 @@
},
{
"cell_type": "code",
- "execution_count": 157,
+ "execution_count": 159,
"metadata": {},
"outputs": [],
"source": [
@@ -2148,7 +2141,7 @@
},
{
"cell_type": "code",
- "execution_count": 158,
+ "execution_count": 160,
"metadata": {},
"outputs": [],
"source": [
@@ -2159,29 +2152,6 @@
"gradients = tape.gradient(z, [c1, c2])"
]
},
- {
- "cell_type": "code",
- "execution_count": 159,
- "metadata": {},
- "outputs": [],
- "source": [
- "gradients"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 160,
- "metadata": {},
- "outputs": [],
- "source": [
- "with tf.GradientTape() as tape:\n",
- " tape.watch(c1)\n",
- " tape.watch(c2)\n",
- " z = f(c1, c2)\n",
- "\n",
- "gradients = tape.gradient(z, [c1, c2])"
- ]
- },
{
"cell_type": "code",
"execution_count": 161,
@@ -2196,6 +2166,29 @@
"execution_count": 162,
"metadata": {},
"outputs": [],
+ "source": [
+ "with tf.GradientTape() as tape:\n",
+ " tape.watch(c1)\n",
+ " tape.watch(c2)\n",
+ " z = f(c1, c2)\n",
+ "\n",
+ "gradients = tape.gradient(z, [c1, c2])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 163,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "gradients"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 164,
+ "metadata": {},
+ "outputs": [],
"source": [
"with tf.GradientTape() as tape:\n",
" z1 = f(w1, w2 + 2.)\n",
@@ -2207,7 +2200,7 @@
},
{
"cell_type": "code",
- "execution_count": 163,
+ "execution_count": 165,
"metadata": {},
"outputs": [],
"source": [
@@ -2222,7 +2215,7 @@
},
{
"cell_type": "code",
- "execution_count": 164,
+ "execution_count": 166,
"metadata": {},
"outputs": [],
"source": [
@@ -2237,7 +2230,7 @@
},
{
"cell_type": "code",
- "execution_count": 165,
+ "execution_count": 167,
"metadata": {},
"outputs": [],
"source": [
@@ -2246,7 +2239,7 @@
},
{
"cell_type": "code",
- "execution_count": 166,
+ "execution_count": 168,
"metadata": {},
"outputs": [],
"source": [
@@ -2255,7 +2248,7 @@
},
{
"cell_type": "code",
- "execution_count": 167,
+ "execution_count": 169,
"metadata": {},
"outputs": [],
"source": [
@@ -2270,7 +2263,7 @@
},
{
"cell_type": "code",
- "execution_count": 168,
+ "execution_count": 170,
"metadata": {},
"outputs": [],
"source": [
@@ -2283,7 +2276,7 @@
},
{
"cell_type": "code",
- "execution_count": 169,
+ "execution_count": 171,
"metadata": {},
"outputs": [],
"source": [
@@ -2292,7 +2285,7 @@
},
{
"cell_type": "code",
- "execution_count": 170,
+ "execution_count": 172,
"metadata": {},
"outputs": [],
"source": [
@@ -2305,7 +2298,7 @@
},
{
"cell_type": "code",
- "execution_count": 171,
+ "execution_count": 173,
"metadata": {},
"outputs": [],
"source": [
@@ -2319,7 +2312,7 @@
},
{
"cell_type": "code",
- "execution_count": 172,
+ "execution_count": 174,
"metadata": {},
"outputs": [],
"source": [
@@ -2329,7 +2322,7 @@
},
{
"cell_type": "code",
- "execution_count": 173,
+ "execution_count": 175,
"metadata": {},
"outputs": [],
"source": [
@@ -2349,7 +2342,7 @@
},
{
"cell_type": "code",
- "execution_count": 174,
+ "execution_count": 176,
"metadata": {},
"outputs": [],
"source": [
@@ -2363,7 +2356,7 @@
},
{
"cell_type": "code",
- "execution_count": 175,
+ "execution_count": 177,
"metadata": {},
"outputs": [],
"source": [
@@ -2374,7 +2367,7 @@
},
{
"cell_type": "code",
- "execution_count": 176,
+ "execution_count": 178,
"metadata": {},
"outputs": [],
"source": [
@@ -2388,7 +2381,7 @@
},
{
"cell_type": "code",
- "execution_count": 177,
+ "execution_count": 179,
"metadata": {},
"outputs": [],
"source": [
@@ -2413,7 +2406,7 @@
},
{
"cell_type": "code",
- "execution_count": 178,
+ "execution_count": 180,
"metadata": {},
"outputs": [],
"source": [
@@ -2428,7 +2421,7 @@
},
{
"cell_type": "code",
- "execution_count": 179,
+ "execution_count": 181,
"metadata": {},
"outputs": [],
"source": [
@@ -2437,7 +2430,7 @@
},
{
"cell_type": "code",
- "execution_count": 180,
+ "execution_count": 182,
"metadata": {},
"outputs": [],
"source": [
@@ -2450,7 +2443,7 @@
},
{
"cell_type": "code",
- "execution_count": 181,
+ "execution_count": 183,
"metadata": {},
"outputs": [],
"source": [
@@ -2466,7 +2459,7 @@
},
{
"cell_type": "code",
- "execution_count": 182,
+ "execution_count": 184,
"metadata": {},
"outputs": [],
"source": [
@@ -2481,7 +2474,7 @@
},
{
"cell_type": "code",
- "execution_count": 183,
+ "execution_count": 185,
"metadata": {},
"outputs": [],
"source": [
@@ -2509,7 +2502,7 @@
},
{
"cell_type": "code",
- "execution_count": 184,
+ "execution_count": 186,
"metadata": {},
"outputs": [],
"source": [
@@ -2552,7 +2545,7 @@
},
{
"cell_type": "code",
- "execution_count": 185,
+ "execution_count": 187,
"metadata": {},
"outputs": [],
"source": [
@@ -2562,7 +2555,7 @@
},
{
"cell_type": "code",
- "execution_count": 186,
+ "execution_count": 188,
"metadata": {},
"outputs": [],
"source": [
@@ -2571,7 +2564,7 @@
},
{
"cell_type": "code",
- "execution_count": 187,
+ "execution_count": 189,
"metadata": {},
"outputs": [],
"source": [
@@ -2580,7 +2573,7 @@
},
{
"cell_type": "code",
- "execution_count": 188,
+ "execution_count": 190,
"metadata": {},
"outputs": [],
"source": [
@@ -2590,7 +2583,7 @@
},
{
"cell_type": "code",
- "execution_count": 189,
+ "execution_count": 191,
"metadata": {},
"outputs": [],
"source": [
@@ -2599,7 +2592,7 @@
},
{
"cell_type": "code",
- "execution_count": 190,
+ "execution_count": 192,
"metadata": {},
"outputs": [],
"source": [
@@ -2615,7 +2608,7 @@
},
{
"cell_type": "code",
- "execution_count": 191,
+ "execution_count": 193,
"metadata": {},
"outputs": [],
"source": [
@@ -2625,7 +2618,7 @@
},
{
"cell_type": "code",
- "execution_count": 192,
+ "execution_count": 194,
"metadata": {},
"outputs": [],
"source": [
@@ -2634,7 +2627,7 @@
},
{
"cell_type": "code",
- "execution_count": 193,
+ "execution_count": 195,
"metadata": {},
"outputs": [],
"source": [
@@ -2650,7 +2643,7 @@
},
{
"cell_type": "code",
- "execution_count": 194,
+ "execution_count": 196,
"metadata": {},
"outputs": [],
"source": [
@@ -2659,7 +2652,7 @@
},
{
"cell_type": "code",
- "execution_count": 195,
+ "execution_count": 197,
"metadata": {},
"outputs": [],
"source": [
@@ -2669,7 +2662,7 @@
},
{
"cell_type": "code",
- "execution_count": 196,
+ "execution_count": 198,
"metadata": {},
"outputs": [],
"source": [
@@ -2679,7 +2672,7 @@
},
{
"cell_type": "code",
- "execution_count": 197,
+ "execution_count": 199,
"metadata": {},
"outputs": [],
"source": [
@@ -2688,7 +2681,7 @@
},
{
"cell_type": "code",
- "execution_count": 198,
+ "execution_count": 200,
"metadata": {},
"outputs": [],
"source": [
@@ -2697,7 +2690,7 @@
},
{
"cell_type": "code",
- "execution_count": 199,
+ "execution_count": 201,
"metadata": {},
"outputs": [],
"source": [
@@ -2706,7 +2699,7 @@
},
{
"cell_type": "code",
- "execution_count": 200,
+ "execution_count": 202,
"metadata": {},
"outputs": [],
"source": [
@@ -2722,7 +2715,7 @@
},
{
"cell_type": "code",
- "execution_count": 201,
+ "execution_count": 203,
"metadata": {},
"outputs": [],
"source": [
@@ -2734,7 +2727,7 @@
},
{
"cell_type": "code",
- "execution_count": 202,
+ "execution_count": 204,
"metadata": {},
"outputs": [],
"source": [
@@ -2743,7 +2736,7 @@
},
{
"cell_type": "code",
- "execution_count": 203,
+ "execution_count": 205,
"metadata": {},
"outputs": [],
"source": [
@@ -2752,7 +2745,7 @@
},
{
"cell_type": "code",
- "execution_count": 204,
+ "execution_count": 206,
"metadata": {},
"outputs": [],
"source": [
@@ -2772,7 +2765,7 @@
},
{
"cell_type": "code",
- "execution_count": 205,
+ "execution_count": 207,
"metadata": {},
"outputs": [],
"source": [
@@ -2784,7 +2777,7 @@
},
{
"cell_type": "code",
- "execution_count": 206,
+ "execution_count": 208,
"metadata": {},
"outputs": [],
"source": [
@@ -2796,7 +2789,7 @@
},
{
"cell_type": "code",
- "execution_count": 207,
+ "execution_count": 209,
"metadata": {},
"outputs": [],
"source": [
@@ -2823,7 +2816,7 @@
},
{
"cell_type": "code",
- "execution_count": 208,
+ "execution_count": 210,
"metadata": {},
"outputs": [],
"source": [
@@ -2836,7 +2829,7 @@
},
{
"cell_type": "code",
- "execution_count": 209,
+ "execution_count": 211,
"metadata": {},
"outputs": [],
"source": [
@@ -2845,7 +2838,7 @@
},
{
"cell_type": "code",
- "execution_count": 210,
+ "execution_count": 212,
"metadata": {},
"outputs": [],
"source": [
@@ -2861,7 +2854,7 @@
},
{
"cell_type": "code",
- "execution_count": 211,
+ "execution_count": 213,
"metadata": {},
"outputs": [],
"source": [
@@ -2875,7 +2868,7 @@
},
{
"cell_type": "code",
- "execution_count": 212,
+ "execution_count": 214,
"metadata": {},
"outputs": [],
"source": [
@@ -2884,7 +2877,7 @@
},
{
"cell_type": "code",
- "execution_count": 213,
+ "execution_count": 215,
"metadata": {},
"outputs": [],
"source": [
@@ -2900,7 +2893,7 @@
},
{
"cell_type": "code",
- "execution_count": 214,
+ "execution_count": 216,
"metadata": {},
"outputs": [],
"source": [
@@ -2913,7 +2906,7 @@
},
{
"cell_type": "code",
- "execution_count": 215,
+ "execution_count": 217,
"metadata": {},
"outputs": [],
"source": [
@@ -2929,7 +2922,7 @@
},
{
"cell_type": "code",
- "execution_count": 216,
+ "execution_count": 218,
"metadata": {},
"outputs": [],
"source": [
@@ -2942,7 +2935,7 @@
},
{
"cell_type": "code",
- "execution_count": 217,
+ "execution_count": 219,
"metadata": {},
"outputs": [],
"source": [
@@ -2952,7 +2945,7 @@
},
{
"cell_type": "code",
- "execution_count": 218,
+ "execution_count": 220,
"metadata": {},
"outputs": [],
"source": [
@@ -2962,7 +2955,7 @@
},
{
"cell_type": "code",
- "execution_count": 219,
+ "execution_count": 221,
"metadata": {},
"outputs": [],
"source": [
@@ -2975,7 +2968,7 @@
},
{
"cell_type": "code",
- "execution_count": 220,
+ "execution_count": 222,
"metadata": {},
"outputs": [],
"source": [
@@ -2985,7 +2978,7 @@
},
{
"cell_type": "code",
- "execution_count": 221,
+ "execution_count": 223,
"metadata": {},
"outputs": [],
"source": [
@@ -2995,7 +2988,7 @@
},
{
"cell_type": "code",
- "execution_count": 222,
+ "execution_count": 224,
"metadata": {},
"outputs": [],
"source": [
@@ -3010,7 +3003,7 @@
},
{
"cell_type": "code",
- "execution_count": 223,
+ "execution_count": 225,
"metadata": {},
"outputs": [],
"source": [
@@ -3021,7 +3014,7 @@
},
{
"cell_type": "code",
- "execution_count": 224,
+ "execution_count": 226,
"metadata": {
"scrolled": true
},
@@ -3033,28 +3026,26 @@
" x += 1\n",
" return x\n",
"\n",
- "tf.autograph.to_code(add_10.python_function, experimental_optional_features=None)\n",
- "# TODO: experimental_optional_features is needed to have the same behavior as @tf.function,\n",
- "# check that this is not needed when TF2 is released"
+ "tf.autograph.to_code(add_10.python_function)"
]
},
{
"cell_type": "code",
- "execution_count": 225,
+ "execution_count": 227,
"metadata": {},
"outputs": [],
"source": [
- "def display_tf_code(func, experimental_optional_features=None):\n",
+ "def display_tf_code(func):\n",
" from IPython.display import display, Markdown\n",
" if hasattr(func, \"python_function\"):\n",
" func = func.python_function\n",
- " code = tf.autograph.to_code(func, experimental_optional_features=experimental_optional_features)\n",
+ " code = tf.autograph.to_code(func)\n",
" display(Markdown('```python\\n{}\\n```'.format(code)))"
]
},
{
"cell_type": "code",
- "execution_count": 226,
+ "execution_count": 228,
"metadata": {},
"outputs": [],
"source": [
@@ -3078,7 +3069,7 @@
},
{
"cell_type": "code",
- "execution_count": 227,
+ "execution_count": 229,
"metadata": {},
"outputs": [],
"source": [
@@ -3090,7 +3081,7 @@
},
{
"cell_type": "code",
- "execution_count": 228,
+ "execution_count": 230,
"metadata": {},
"outputs": [],
"source": [
@@ -3102,7 +3093,7 @@
},
{
"cell_type": "code",
- "execution_count": 229,
+ "execution_count": 231,
"metadata": {},
"outputs": [],
"source": [
@@ -3131,7 +3122,7 @@
},
{
"cell_type": "code",
- "execution_count": 230,
+ "execution_count": 232,
"metadata": {},
"outputs": [],
"source": [
@@ -3156,7 +3147,7 @@
},
{
"cell_type": "code",
- "execution_count": 231,
+ "execution_count": 233,
"metadata": {},
"outputs": [],
"source": [
@@ -3165,7 +3156,7 @@
},
{
"cell_type": "code",
- "execution_count": 232,
+ "execution_count": 234,
"metadata": {},
"outputs": [],
"source": [
@@ -3183,7 +3174,7 @@
},
{
"cell_type": "code",
- "execution_count": 233,
+ "execution_count": 235,
"metadata": {},
"outputs": [],
"source": [
@@ -3192,7 +3183,7 @@
},
{
"cell_type": "code",
- "execution_count": 234,
+ "execution_count": 236,
"metadata": {},
"outputs": [],
"source": [
@@ -3208,7 +3199,7 @@
},
{
"cell_type": "code",
- "execution_count": 235,
+ "execution_count": 237,
"metadata": {},
"outputs": [],
"source": [
@@ -3226,7 +3217,7 @@
},
{
"cell_type": "code",
- "execution_count": 236,
+ "execution_count": 238,
"metadata": {},
"outputs": [],
"source": [
@@ -3235,7 +3226,7 @@
},
{
"cell_type": "code",
- "execution_count": 237,
+ "execution_count": 239,
"metadata": {},
"outputs": [],
"source": [
@@ -3244,7 +3235,7 @@
},
{
"cell_type": "code",
- "execution_count": 238,
+ "execution_count": 240,
"metadata": {},
"outputs": [],
"source": [
@@ -3269,7 +3260,7 @@
},
{
"cell_type": "code",
- "execution_count": 239,
+ "execution_count": 241,
"metadata": {},
"outputs": [],
"source": [
@@ -3315,7 +3306,7 @@
},
{
"cell_type": "code",
- "execution_count": 240,
+ "execution_count": 242,
"metadata": {
"scrolled": false
},
@@ -3343,7 +3334,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.6.8"
+ "version": "3.7.3"
}
},
"nbformat": 4,