6589 lines
214 KiB
Plaintext
6589 lines
214 KiB
Plaintext
{
|
||
"cells": [
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"**Chapter 12 – Custom Models and Training with TensorFlow**"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"_This notebook contains all the sample code and solutions to the exercises in chapter 12, as well as code examples from Appendix C_"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"<table align=\"left\">\n",
|
||
" <td>\n",
|
||
" <a href=\"https://colab.research.google.com/github/ageron/handson-ml3/blob/main/12_custom_models_and_training_with_tensorflow.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\n",
|
||
" </td>\n",
|
||
" <td>\n",
|
||
" <a target=\"_blank\" href=\"https://kaggle.com/kernels/welcome?src=https://github.com/ageron/handson-ml3/blob/main/12_custom_models_and_training_with_tensorflow.ipynb\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" /></a>\n",
|
||
" </td>\n",
|
||
"</table>"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {
|
||
"tags": []
|
||
},
|
||
"source": [
|
||
"# Setup"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"This project requires Python 3.7 or above:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 1,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"import sys\n",
|
||
"\n",
|
||
"assert sys.version_info >= (3, 7)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"And TensorFlow ≥ 2.8:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 2,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"from packaging import version\n",
|
||
"import tensorflow as tf\n",
|
||
"\n",
|
||
"assert version.parse(tf.__version__) >= version.parse(\"2.8.0\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Using TensorFlow like NumPy"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### Tensors and Operations"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"#### Tensors"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 3,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(2, 3), dtype=float32, numpy=\n",
|
||
"array([[1., 2., 3.],\n",
|
||
" [4., 5., 6.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 3,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"t = tf.constant([[1., 2., 3.], [4., 5., 6.]]) # matrix\n",
|
||
"t"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 4,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"TensorShape([2, 3])"
|
||
]
|
||
},
|
||
"execution_count": 4,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"t.shape"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 5,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"tf.float32"
|
||
]
|
||
},
|
||
"execution_count": 5,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"t.dtype"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"#### Indexing"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 6,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(2, 2), dtype=float32, numpy=\n",
|
||
"array([[2., 3.],\n",
|
||
" [5., 6.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 6,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"t[:, 1:]"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 7,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(2, 1), dtype=float32, numpy=\n",
|
||
"array([[2.],\n",
|
||
" [5.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 7,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"t[..., 1, tf.newaxis]"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"#### Ops"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 8,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(2, 3), dtype=float32, numpy=\n",
|
||
"array([[11., 12., 13.],\n",
|
||
" [14., 15., 16.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 8,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"t + 10"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 9,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(2, 3), dtype=float32, numpy=\n",
|
||
"array([[ 1., 4., 9.],\n",
|
||
" [16., 25., 36.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 9,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.square(t)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 10,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(2, 2), dtype=float32, numpy=\n",
|
||
"array([[14., 32.],\n",
|
||
" [32., 77.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 10,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"t @ tf.transpose(t)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"#### Scalars"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 11,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=int32, numpy=42>"
|
||
]
|
||
},
|
||
"execution_count": 11,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.constant(42)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"#### Keras's low-level API"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"You may still run across code that uses Keras's low-level API:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 12,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(3, 2), dtype=float32, numpy=\n",
|
||
"array([[11., 26.],\n",
|
||
" [14., 35.],\n",
|
||
" [19., 46.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 12,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"K = tf.keras.backend\n",
|
||
"K.square(K.transpose(t)) + 10"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"But since Keras does not support multiple backends anymore, you should instead use TF's low-level API directly:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 13,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(3, 2), dtype=float32, numpy=\n",
|
||
"array([[11., 26.],\n",
|
||
" [14., 35.],\n",
|
||
" [19., 46.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 13,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.square(tf.transpose(t)) + 10"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### Tensors and NumPy"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 14,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(3,), dtype=float64, numpy=array([2., 4., 5.])>"
|
||
]
|
||
},
|
||
"execution_count": 14,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"import numpy as np\n",
|
||
"\n",
|
||
"a = np.array([2., 4., 5.])\n",
|
||
"tf.constant(a)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 15,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"array([[1., 2., 3.],\n",
|
||
" [4., 5., 6.]], dtype=float32)"
|
||
]
|
||
},
|
||
"execution_count": 15,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"t.numpy()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 16,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"array([[1., 2., 3.],\n",
|
||
" [4., 5., 6.]], dtype=float32)"
|
||
]
|
||
},
|
||
"execution_count": 16,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"np.array(t)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 17,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(3,), dtype=float64, numpy=array([ 4., 16., 25.])>"
|
||
]
|
||
},
|
||
"execution_count": 17,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.square(a)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 18,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"array([[ 1., 4., 9.],\n",
|
||
" [16., 25., 36.]], dtype=float32)"
|
||
]
|
||
},
|
||
"execution_count": 18,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"np.square(t)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### Type Conversions"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 19,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"cannot compute AddV2 as input #1(zero-based) was expected to be a float tensor but is a int32 tensor [Op:AddV2] name: \n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"try:\n",
|
||
" tf.constant(2.0) + tf.constant(40)\n",
|
||
"except tf.errors.InvalidArgumentError as ex:\n",
|
||
" print(ex)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 20,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"cannot compute AddV2 as input #1(zero-based) was expected to be a float tensor but is a double tensor [Op:AddV2] name: \n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"try:\n",
|
||
" tf.constant(2.0) + tf.constant(40., dtype=tf.float64)\n",
|
||
"except tf.errors.InvalidArgumentError as ex:\n",
|
||
" print(ex)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 21,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=42.0>"
|
||
]
|
||
},
|
||
"execution_count": 21,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"t2 = tf.constant(40., dtype=tf.float64)\n",
|
||
"tf.constant(2.0) + tf.cast(t2, tf.float32)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### Variables"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 22,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=\n",
|
||
"array([[1., 2., 3.],\n",
|
||
" [4., 5., 6.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 22,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"v = tf.Variable([[1., 2., 3.], [4., 5., 6.]])\n",
|
||
"v"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 23,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Variable 'UnreadVariable' shape=(2, 3) dtype=float32, numpy=\n",
|
||
"array([[ 2., 4., 6.],\n",
|
||
" [ 8., 10., 12.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 23,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"v.assign(2 * v)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 24,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Variable 'UnreadVariable' shape=(2, 3) dtype=float32, numpy=\n",
|
||
"array([[ 2., 42., 6.],\n",
|
||
" [ 8., 10., 12.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 24,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"v[0, 1].assign(42)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 25,
|
||
"metadata": {
|
||
"tags": []
|
||
},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Variable 'UnreadVariable' shape=(2, 3) dtype=float32, numpy=\n",
|
||
"array([[ 2., 42., 0.],\n",
|
||
" [ 8., 10., 1.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 25,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"v[:, 2].assign([0., 1.])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 26,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Variable 'UnreadVariable' shape=(2, 3) dtype=float32, numpy=\n",
|
||
"array([[100., 42., 0.],\n",
|
||
" [ 8., 10., 200.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 26,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"v.scatter_nd_update(\n",
|
||
" indices=[[0, 0], [1, 2]], updates=[100., 200.])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 27,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Variable 'UnreadVariable' shape=(2, 3) dtype=float32, numpy=\n",
|
||
"array([[4., 5., 6.],\n",
|
||
" [1., 2., 3.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 27,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows how to use scatter_update()\n",
|
||
"sparse_delta = tf.IndexedSlices(values=[[1., 2., 3.], [4., 5., 6.]],\n",
|
||
" indices=[1, 0])\n",
|
||
"v.scatter_update(sparse_delta)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 28,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"'ResourceVariable' object does not support item assignment\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"try:\n",
|
||
" v[1] = [7., 8., 9.]\n",
|
||
"except TypeError as ex:\n",
|
||
" print(ex)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### Strings"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"The code in this section and all the following sections in appendix C"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 29,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=string, numpy=b'hello world'>"
|
||
]
|
||
},
|
||
"execution_count": 29,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.constant(b\"hello world\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 30,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=string, numpy=b'caf\\xc3\\xa9'>"
|
||
]
|
||
},
|
||
"execution_count": 30,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.constant(\"café\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 31,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 99, 97, 102, 233])>"
|
||
]
|
||
},
|
||
"execution_count": 31,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"u = tf.constant([ord(c) for c in \"café\"])\n",
|
||
"u"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 32,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=int32, numpy=4>"
|
||
]
|
||
},
|
||
"execution_count": 32,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"b = tf.strings.unicode_encode(u, \"UTF-8\")\n",
|
||
"tf.strings.length(b, unit=\"UTF8_CHAR\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 33,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 99, 97, 102, 233])>"
|
||
]
|
||
},
|
||
"execution_count": 33,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.strings.unicode_decode(b, \"UTF-8\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### Other Data Structures"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"The code in this section is in Appendix C."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"#### String arrays"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 34,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=string, numpy=b'hello world'>"
|
||
]
|
||
},
|
||
"execution_count": 34,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.constant(b\"hello world\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 35,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=string, numpy=b'caf\\xc3\\xa9'>"
|
||
]
|
||
},
|
||
"execution_count": 35,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.constant(\"café\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 36,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 99, 97, 102, 233])>"
|
||
]
|
||
},
|
||
"execution_count": 36,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"u = tf.constant([ord(c) for c in \"café\"])\n",
|
||
"u"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 37,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=string, numpy=b'caf\\xc3\\xa9'>"
|
||
]
|
||
},
|
||
"execution_count": 37,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"b = tf.strings.unicode_encode(u, \"UTF-8\")\n",
|
||
"b"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 38,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=int32, numpy=4>"
|
||
]
|
||
},
|
||
"execution_count": 38,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.strings.length(b, unit=\"UTF8_CHAR\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 39,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 99, 97, 102, 233])>"
|
||
]
|
||
},
|
||
"execution_count": 39,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.strings.unicode_decode(b, \"UTF-8\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 40,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"p = tf.constant([\"Café\", \"Coffee\", \"caffè\", \"咖啡\"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 41,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(4,), dtype=int32, numpy=array([4, 6, 5, 2])>"
|
||
]
|
||
},
|
||
"execution_count": 41,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.strings.length(p, unit=\"UTF8_CHAR\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 42,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.RaggedTensor [[67, 97, 102, 233], [67, 111, 102, 102, 101, 101],\n",
|
||
" [99, 97, 102, 102, 232], [21654, 21857]]>"
|
||
]
|
||
},
|
||
"execution_count": 42,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"r = tf.strings.unicode_decode(p, \"UTF8\")\n",
|
||
"r"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"#### Ragged tensors"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 43,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(6,), dtype=int32, numpy=array([ 67, 111, 102, 102, 101, 101])>"
|
||
]
|
||
},
|
||
"execution_count": 43,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"r[1]"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 44,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.RaggedTensor [[67, 111, 102, 102, 101, 101], [99, 97, 102, 102, 232]]>"
|
||
]
|
||
},
|
||
"execution_count": 44,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"r[1:3] # extra code – a slice of a ragged tensor is a ragged tensor"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 45,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.RaggedTensor [[67, 97, 102, 233], [67, 111, 102, 102, 101, 101],\n",
|
||
" [99, 97, 102, 102, 232], [21654, 21857], [65, 66], [], [67]]>"
|
||
]
|
||
},
|
||
"execution_count": 45,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"r2 = tf.ragged.constant([[65, 66], [], [67]])\n",
|
||
"tf.concat([r, r2], axis=0)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 46,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"<tf.RaggedTensor [[67, 97, 102, 233, 68, 69, 70], [67, 111, 102, 102, 101, 101, 71],\n",
|
||
" [99, 97, 102, 102, 232], [21654, 21857, 72, 73]]>\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"r3 = tf.ragged.constant([[68, 69, 70], [71], [], [72, 73]])\n",
|
||
"print(tf.concat([r, r3], axis=1))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 47,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(4, 6), dtype=int32, numpy=\n",
|
||
"array([[ 67, 97, 102, 233, 0, 0],\n",
|
||
" [ 67, 111, 102, 102, 101, 101],\n",
|
||
" [ 99, 97, 102, 102, 232, 0],\n",
|
||
" [21654, 21857, 0, 0, 0, 0]])>"
|
||
]
|
||
},
|
||
"execution_count": 47,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"r.to_tensor()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"#### Sparse tensors"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 48,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"s = tf.SparseTensor(indices=[[0, 1], [1, 0], [2, 3]],\n",
|
||
" values=[1., 2., 3.],\n",
|
||
" dense_shape=[3, 4])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 49,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(3, 4), dtype=float32, numpy=\n",
|
||
"array([[0., 1., 0., 0.],\n",
|
||
" [2., 0., 0., 0.],\n",
|
||
" [0., 0., 0., 3.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 49,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.sparse.to_dense(s)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 50,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"SparseTensor(indices=tf.Tensor(\n",
|
||
"[[0 1]\n",
|
||
" [1 0]\n",
|
||
" [2 3]], shape=(3, 2), dtype=int64), values=tf.Tensor([ 42. 84. 126.], shape=(3,), dtype=float32), dense_shape=tf.Tensor([3 4], shape=(2,), dtype=int64))"
|
||
]
|
||
},
|
||
"execution_count": 50,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"s * 42.0"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 51,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"unsupported operand type(s) for +: 'SparseTensor' and 'float'\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"try:\n",
|
||
" s + 42.0\n",
|
||
"except TypeError as ex:\n",
|
||
" print(ex)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 52,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(3, 2), dtype=float32, numpy=\n",
|
||
"array([[ 30., 40.],\n",
|
||
" [ 20., 40.],\n",
|
||
" [210., 240.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 52,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows how to multiply a sparse tensor and a dense tensor\n",
|
||
"s4 = tf.constant([[10., 20.], [30., 40.], [50., 60.], [70., 80.]])\n",
|
||
"tf.sparse.sparse_dense_matmul(s, s4)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 53,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"{{function_node __wrapped__SparseToDense_device_/job:localhost/replica:0/task:0/device:CPU:0}} indices[1] = [0,1] is out of order. Many sparse ops require sorted indices.\n",
|
||
" Use `tf.sparse.reorder` to create a correctly ordered copy.\n",
|
||
"\n",
|
||
" [Op:SparseToDense] name: \n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – when creating a sparse tensor, values must be given in \"reading\n",
|
||
"# order\", or else `to_dense()` will fail.\n",
|
||
"s5 = tf.SparseTensor(indices=[[0, 2], [0, 1]], # WRONG ORDER!\n",
|
||
" values=[1., 2.],\n",
|
||
" dense_shape=[3, 4])\n",
|
||
"try:\n",
|
||
" tf.sparse.to_dense(s5)\n",
|
||
"except tf.errors.InvalidArgumentError as ex:\n",
|
||
" print(ex)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 54,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(3, 4), dtype=float32, numpy=\n",
|
||
"array([[0., 2., 1., 0.],\n",
|
||
" [0., 0., 0., 0.],\n",
|
||
" [0., 0., 0., 0.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 54,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows how to fix the sparse tensor s5 by reordering its values\n",
|
||
"s6 = tf.sparse.reorder(s5)\n",
|
||
"tf.sparse.to_dense(s6)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"#### Tensor Arrays"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 55,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"array = tf.TensorArray(dtype=tf.float32, size=3)\n",
|
||
"array = array.write(0, tf.constant([1., 2.]))\n",
|
||
"array = array.write(1, tf.constant([3., 10.]))\n",
|
||
"array = array.write(2, tf.constant([5., 7.]))\n",
|
||
"tensor1 = array.read(1) # returns (and zeros out!) tf.constant([3., 10.])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 56,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(3, 2), dtype=float32, numpy=\n",
|
||
"array([[1., 2.],\n",
|
||
" [0., 0.],\n",
|
||
" [5., 7.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 56,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"array.stack()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 57,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(3, 2), dtype=float32, numpy=\n",
|
||
"array([[ 1., 2.],\n",
|
||
" [ 3., 10.],\n",
|
||
" [ 5., 7.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 57,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows how to disable clear_after_read\n",
|
||
"array2 = tf.TensorArray(dtype=tf.float32, size=3, clear_after_read=False)\n",
|
||
"array2 = array2.write(0, tf.constant([1., 2.]))\n",
|
||
"array2 = array2.write(1, tf.constant([3., 10.]))\n",
|
||
"array2 = array2.write(2, tf.constant([5., 7.]))\n",
|
||
"tensor2 = array2.read(1) # returns tf.constant([3., 10.])\n",
|
||
"array2.stack()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 58,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(3, 2), dtype=float32, numpy=\n",
|
||
"array([[1., 2.],\n",
|
||
" [0., 0.],\n",
|
||
" [5., 7.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 58,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows how to create and use a tensor array with a dynamic size\n",
|
||
"array3 = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)\n",
|
||
"array3 = array3.write(0, tf.constant([1., 2.]))\n",
|
||
"array3 = array3.write(1, tf.constant([3., 10.]))\n",
|
||
"array3 = array3.write(2, tf.constant([5., 7.]))\n",
|
||
"tensor3 = array3.read(1)\n",
|
||
"array3.stack()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"#### Sets"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 59,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"SparseTensor(indices=tf.Tensor(\n",
|
||
"[[0 0]\n",
|
||
" [0 1]\n",
|
||
" [0 2]\n",
|
||
" [0 3]\n",
|
||
" [0 4]], shape=(5, 2), dtype=int64), values=tf.Tensor([ 1 5 6 9 11], shape=(5,), dtype=int32), dense_shape=tf.Tensor([1 5], shape=(2,), dtype=int64))"
|
||
]
|
||
},
|
||
"execution_count": 59,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"a = tf.constant([[1, 5, 9]])\n",
|
||
"b = tf.constant([[5, 6, 9, 11]])\n",
|
||
"u = tf.sets.union(a, b)\n",
|
||
"u"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 60,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(1, 5), dtype=int32, numpy=array([[ 1, 5, 6, 9, 11]])>"
|
||
]
|
||
},
|
||
"execution_count": 60,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.sparse.to_dense(u)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 61,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(2, 5), dtype=int32, numpy=\n",
|
||
"array([[ 1, 5, 6, 9, 11],\n",
|
||
" [ 0, 10, 13, 0, 0]])>"
|
||
]
|
||
},
|
||
"execution_count": 61,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"a = tf.constant([[1, 5, 9], [10, 0, 0]])\n",
|
||
"b = tf.constant([[5, 6, 9, 11], [13, 0, 0, 0]])\n",
|
||
"u = tf.sets.union(a, b)\n",
|
||
"tf.sparse.to_dense(u)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 62,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(2, 5), dtype=int32, numpy=\n",
|
||
"array([[ 1, 5, 6, 9, 11],\n",
|
||
" [-1, 10, 13, -1, -1]])>"
|
||
]
|
||
},
|
||
"execution_count": 62,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows how to use a different default value: -1 in this case\n",
|
||
"a = tf.constant([[1, 5, 9], [10, -1, -1]])\n",
|
||
"b = tf.constant([[5, 6, 9, 11], [13, -1, -1, -1]])\n",
|
||
"u = tf.sets.union(a, b)\n",
|
||
"tf.sparse.to_dense(u, default_value=-1)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 63,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(2, 3), dtype=int32, numpy=\n",
|
||
"array([[2, 3, 7],\n",
|
||
" [7, 0, 0]])>"
|
||
]
|
||
},
|
||
"execution_count": 63,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows how to use `tf.sets.difference()`\n",
|
||
"set1 = tf.constant([[2, 3, 5, 7], [7, 9, 0, 0]])\n",
|
||
"set2 = tf.constant([[4, 5, 6], [9, 10, 0]])\n",
|
||
"tf.sparse.to_dense(tf.sets.difference(set1, set2))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 64,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(2, 2), dtype=int32, numpy=\n",
|
||
"array([[5, 0],\n",
|
||
" [0, 9]])>"
|
||
]
|
||
},
|
||
"execution_count": 64,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows how to use `tf.sets.difference()`\n",
|
||
"tf.sparse.to_dense(tf.sets.intersection(set1, set2))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 65,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(1,), dtype=bool, numpy=array([ True])>"
|
||
]
|
||
},
|
||
"execution_count": 65,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – check whether set1[0] contains 5\n",
|
||
"tf.sets.size(tf.sets.intersection(set1[:1], tf.constant([[5, 0, 0, 0]]))) > 0"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"#### Queues"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 66,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=int32, numpy=2>"
|
||
]
|
||
},
|
||
"execution_count": 66,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"q = tf.queue.FIFOQueue(3, [tf.int32, tf.string], shapes=[(), ()])\n",
|
||
"q.enqueue([10, b\"windy\"])\n",
|
||
"q.enqueue([15, b\"sunny\"])\n",
|
||
"q.size()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 67,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor: shape=(), dtype=int32, numpy=10>,\n",
|
||
" <tf.Tensor: shape=(), dtype=string, numpy=b'windy'>]"
|
||
]
|
||
},
|
||
"execution_count": 67,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"q.dequeue()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 68,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"q.enqueue_many([[13, 16], [b'cloudy', b'rainy']])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 69,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor: shape=(3,), dtype=int32, numpy=array([15, 13, 16])>,\n",
|
||
" <tf.Tensor: shape=(3,), dtype=string, numpy=array([b'sunny', b'cloudy', b'rainy'], dtype=object)>]"
|
||
]
|
||
},
|
||
"execution_count": 69,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"q.dequeue_many(3)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Custom loss function"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 70,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"def huber_fn(y_true, y_pred):\n",
|
||
" error = y_true - y_pred\n",
|
||
" is_small_error = tf.abs(error) < 1\n",
|
||
" squared_loss = tf.square(error) / 2\n",
|
||
" linear_loss = tf.abs(error) - 0.5\n",
|
||
" return tf.where(is_small_error, squared_loss, linear_loss)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 71,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAqQAAAFkCAYAAAD2RimAAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8fJSN1AAAACXBIWXMAAA9hAAAPYQGoP6dpAAB56ElEQVR4nO3dd3gUVRfA4d+mkNASQui9CkgvAqF3kBoUkCZFQJEiRVFjoYgS9KNJkSJIEZGmBBAEQgmhhF6kS++hk0BI2WTn++OaBglkN2V2k/M+zz5kJlNOLpPN2Zl7zzVomqYhhBBCCCGETuz0DkAIIYQQQmRskpAKIYQQQghdSUIqhBBCCCF0JQmpEEIIIYTQlSSkQgghhBBCV5KQCiGEEEIIXUlCKoQQQgghdCUJqRBCCCGE0JUkpEIIIYQQQleSkAohRCLGjh2LwWDAz89P71Be0KdPHwwGA1euXNE7FCGESDZJSIUQNuXKlSsYDAZatWqV6DZ+fn4YDAYGDhyYhpEJIYSwlCSkQgghhBBCV5KQCiGEEEIIXUlCKoTIMIoVK0axYsUS/F6jRo0wGAyJ7rtgwQIqVqyIs7MzBQsWZMSIETx58iTBbf/55x+6du1K/vz5yZQpE0WLFmXo0KE8ePAg3nbR3Q/69OnDmTNn6NixI+7u7snuG7pw4UJq1apFtmzZyJYtG7Vq1WLRokUJbvvHH3/QsGFD8uTJg7OzMwUKFKBZs2b88ccf8bbbsWMHb775JgUKFMDJyYm8efNSv3595s2bZ3GcQggRzUHvAIQQwtpNmTKFbdu28c4779CmTRu2bt3KtGnT2LdvH/7+/jg6OsZsu27dOrp06YKdnR0dOnSgcOHCnD59mpkzZ7J582b279+Pm5tbvONfuHCB2rVrU7FiRfr06cODBw/IlCmTRbF+9NFHzJgxg4IFC9KvXz9AJZ19+/bl6NGj/PjjjzHbzp49m0GDBpE/f/6YZDgwMJADBw6wZs0a3n77bQA2bNhAu3btyJEjBx06dCB//vzcu3eP48eP8+uvv/L+++9bFKsQQkSThFQIYZMuXLjA2LFjE/xeSo8837x5MwcPHqRSpUoAaJpGz549WbZsGdOnT+fjjz8G4MGDB7z77rvkypWLPXv2ULRo0ZhjLF++nG7dujF69GhmzJgR7/h79uxh9OjRjBs3Lllx+vv7M2PGDMqVK0dAQACurq6AqhZQu3Ztpk+fTqdOnahfvz4A8+fPJ1OmTBw7dow8efLEO1bcu7m//PILmqaxY8cOKleunOh2QghhKUlIhRA26eLFi8lO4JKqV69eMckogMFgYMKECaxYsYJFixbFJKRLliwhODiYmTNnxktGAbp27cr//vc/li9f/kJCmi9fPr788stkx7l48WJAJaDRySiAm5sbY8aMoUePHixatCgmIQVwdHSMd4c3mru7+wvrMmfOnKTthBDCXJKQCiFsUsuWLdm0aVOC3/Pz86Nx48Ypdq64CVy0okWLUrhwYU6dOkVERASZMmVi3759AOzfv5+LFy++sE9YWBj379/n/v375MqVK2Z95cqVLX5EH9fRo0cB1R/2edHtcezYsZh1Xbt25dNPP6VChQp0796dxo0bU69ePVxcXOLt27VrV/78809q165N9+7dadq0KfXr14/3MwghRHJIQiqEEK+QN2/eRNdfuXKFJ0+e4O7uzsOHDwGYNWvWS48XEhISL5lL7PjmCg4Oxs7Ojty5cycYq8FgIDg4OGbdJ598gru7O7Nnz2by5MlMmjQJBwcH2rRpw9SpUylevDgAnTt3xsfHhylTpjBnzhxmzZqFwWCgcePGTJ48mSpVqqRI/EKIjEtG2QshMgw7OzsiIyMT/F5QUFCi+925cyfR9QaDgezZswPE3Fk8ceIEmqYl+nr+cf7LRvebw8XFBZPJxL1791743t27d9E0Ld7dT4PBwHvvvcfBgwe5d+8ea9as4a233mLt2rW0bduWqKiomG07dOjAzp07efToEX///Tf9+/fHz8+PVq1a8fjx4xSJXwiRcUlCKoTIMNzc3Lh79+4LSWlISAjnz59PdL9du3a9sO7q1atcv36d8uXLxzxur1WrFgABAQEpGHXSVa1aFSDBqU6j1yV2N9Pd3R1PT09WrFhBkyZNOH36NBcuXHhhu+zZs9OqVSvmzZtHnz59uHPnDvv370+pH0EIkUFJQiqEyDDeeOMNjEYjv/32W8w6TdPw8vIiJCQk0f2WLFnCP//8E2+fL774gqioKPr06ROzvm/fvmTPnp0vv/ySU6dOvXCcZ8+exfQzTQ29e/cGYNy4cfEezQcFBcUMAIveBlSSqmlavGMYjcaYrgfOzs6AGr0f925ptLt378bbTgghLCV9SIUQGcaQIUNYuHAh/fv3x9fXl9y5c7Nr1y4eP35M5cqVOX78eIL7tWzZEg8PD7p27Uru3LnZtm0bhw4donbt2gwdOjRmu9y5c/P777/TuXNnKleuTKtWrShbtizh4eFcuXKFnTt3UqdOnUQHYyVXgwYNGDp0KDNmzKBChQq8/fbbaJrGH3/8wY0bN/joo49o0KBBzPaenp64uLhQu3ZtihYtitFoxNfXl9OnT9OpU6eYrgUfffQRt27dol69ehQrVgyDwcDu3bs5cOAAtWvXpl69eqny8wghMg5JSIUQGUaFChXYtGkTXl5erF69mmzZstG6dWsmTZpEly5dEt1v5MiRtG/fnmnTpnHhwgVy5szJsGHDGD9+/Auj49u0acPRo0f53//+x9atW/H19SVr1qwUKlSIvn370rNnz1T9GadPn07VqlWZPXt2zCxK5cuX55tvvqFv377xtvX29mbTpk0cOHCA9evXkzVrVkqWLMns2bNjiuoDeHl58eeff3L48GE2b96Mo6MjxYoV4/vvv2fQoEHY29un6s8khEj/DNrzz2uEEEIIIYRIQ9KHVAghhBBC6EoSUiGEEEIIoStJSIUQQgghhK6SlZBOnDgRg8HA8OHDX7rdqlWrKFu2LM7OzlSsWJGNGzcm57RCCCGEECIdsTghPXjwIHPnzqVSpUov3W7v3r1069aNfv36cfToUTw9PfH09OTkyZOWnloIIYQQQqQjFo2yf/r0KdWqVeOnn37i22+/pUqVKkybNi3Bbd955x1CQkL466+/YtbVrl2bKlWqMGfOHIsDF0IIIYQQ6YNFdUgHDx5MmzZtaNasGd9+++1Ltw0ICGDkyJHx1rVs2RIfH59E9wkPDyc8PDxm2WQy8fDhQ9zd3VNszmchhBBCCJFyNE3jyZMnFChQADs78x7Cm52QLl++nCNHjnDw4MEkbR8YGEjevHnjrcubNy+BgYGJ7uPt7R0zzZ0QQgghhLAd169fp1ChQmbtY1ZCev36dYYNG4avr2+qzl3s5eUV765qUFAQRYoU4d9//yVnzpypdt70xGg0smPHDho3boyjo2OS97Pz9karXRutceNUjM56WdpuGZm0mflCQkJipuW8ePEirq6uOkdkG+Ras0xGbjfDypXYDx9O5PHjkDt3kvfLyG2WHA8fPuS1114je/bsZu9rVkJ6+PBh7t69S7Vq1WLWRUVF4e/vz8yZMwkPD39hCrl8+fJx586deOvu3LlDvnz5Ej2Pk5MTTk5OL6zPmTMn7u7u5oScYRmNRrJkyYK7u7t5v0yTJqVeUDbA4nbLwKTNzBf3A33OnDnJkSOHfsHYELnWLJOh261LF8iZE8qWNWu3DN1mKcCS7pVmPeBv2rQpJ06c4NixYzGvGjVq0KNHD44dO5bgfMYeHh5s27Yt3jpfX188PDzMDlakkRMnYMkSvaMQQgghksfdHd55R+8oRBKYdYc0e/bsVKhQId66rFmz4u7uHrO+V69eFCxYEG9vbwCGDRtGw4YNmTx5Mm3atGH58uUcOnSIefPmpdCPIFLcX3/BL79A9+7gYNG4NyGEEEJfY8dC3rzw4Yd6RyKSIMVnarp27Rq3b9+OWa5Tpw7Lli1j3rx5VK5cmdWrV+Pj4/NCYiusyLBhcOaMJKNCCCFs1+PH8OSJ3lGIJEp2xuHn5/fSZYDOnTvTuXPn5J5KpJUsWdS/jx5BjhwgpbaEEELYmkTqowvrJHPZi4T9+y8UKAA7dugdiRBCCJF0mgYbNoDRqHckwgySkIqElS4NkydDxYp6RyKEEEIk3dGj0LYt7NqldyTCDNJJUCTMYIBBg/SOQgghhDBPtWqqWkz58npHIswgd0jFy/30E8yapXcUQgghxKuZTOrfChVk/IONkYRUvNzFi3Dpkt5RCCGEEK82cSK0bKn6kQqbIgmpeLnJk9VLCCFE+nboEPTqBaVKqbuLX32ld0Tmq1oVWrSQu6M2SBJS8WpRUbBzp95RCCGESE179sC+fVCvHri6puyxV66ENm0gf3517AYNYPfulD0HwJtvwscfp/xxRaqThFS82vr10KgRnDundyRCCCFSy9ChquTfokWqBnVKmjYNcuVSYxJWrYKCBaFpUzh+POXOMXUqnD6dcscTaUpG2YtXa9NGPcopU0bvSIQQQqQWu1S8R7V+vZpXPlqzZqqs4KxZkBJTiT95ohJSd3d4/fXkH0+kOUlIxas5OkL16uprTZO+OUIIIcwTNxkFlfxWqACXL6fM8bNnVwNwZTCTzZJH9iJpTCbVN2fGDL0jEUIIYeuiouDgQTWAKrmePYOHD8HBQd1AETZJElKRNHZ2ULs2lCypdyRCCCFs3cyZcO1aykzAsmgRFCsGwcHJP5bQjTyyF0k3ZozeEQghhLB1+/fD55+rslIpMT11586QJw+4uCT/WEI3codUmOfcOZg7V+8ohBBC2KIrV6BDB2jXLuVucuTODZ06pcyxhG4kIRXm2bEDxo9XfXaEEEJkXJ07q0GuL3vFrTX6+LGq2lKsGCxenDIDZIcOhV9/Tf5xhO7kkb0wT9++8N57kCmT3pEIIYRISffuxU6C8uwZnD0Lq1dD1qxqUOvzypWD3r1fXH/tmrp54egIlSqpdRER8NZb6rjbt0PmzMmP12RSx4uMTP6xhO4kIRXmcXJS/z5+DM7O6iWEEML2nTql7npG++MP9SpaVD1qf94337y47soVNZGKo6OanSm6X+egQSrZ/flnVeoputyTk5Oa7tMSdnawYIFl+wqrIwmpMF9QkHqDmjQJBgzQOxohhBApoVGj5NXxjE5Gb91SyainZ+z3tm5VdzT79Yu/T2LJ7quEhsKWLdC2LdjbWx6zsBqSkArzubrC7NnQuLHekQghhLAGcZPRVavUwKXnv5+SNm9WXQDOn5dyhOmEJKTCMt276x2BEEIIa/CqZDQ1eHqqqi+SjKYbMspeWG75chg2TO8ohBBC6EWPZNRoVP+WLp365xJpRhJSYbnQUHjwQPULEkIIYbteVb4p7ita3GR09eq0SUYBevWS8QvpkCSkwnJ9+8LSpWqkoxBCCNulabBiBbRuDfnyqdHx9evDrl3qe3FfEJuM3r6tktH27dMu1o4doWXLtDufSBNmZRKzZ8+mUqVKuLi44OLigoeHB3///Xei2y9atAiDwRDv5SxlgtIXkwk2boSnT/WORAghRHJMmwa5csGsWerxe8GC0LQpHD8ef7u4yeiqVWmbjAJ06SIzM6VDZg1qKlSoEBMnTqR06dJomsbixYvp0KEDR48epXz58gnu4+Liwrlz52KWDSkxM4OwHjduqCngfvsNunbVOxohhBCWWr8e3N1jl5s1U3PNz5oF8+bFru/dG65ehddegz//VK/nde8OLVqkbHzPnsEXX8DHH0Phwil7bJEiklM1zKyEtF27dvGWv/vuO2bPns2+ffsSTUgNBgP58uWzPEJh3YoUgdOn1RuTEEII2xU3GQXVHatChdgi9qCeih0+rL7+91/1SkjcAvsp5exZdUdWBtNapeBgeOcdy2vCWlz2KSoqilWrVhESEoKHh0ei2z19+pSiRYtiMpmoVq0aEyZMSDR5jRYeHk54eHjMcnBwMACLF5sYOtRoacgZivG/UYjR/6aqEiXU1G1Go5qdw4alabulE9Jm5ovbVkajUdouieRas4zF7RYVhcPBg5iaN8cUd99Hj5J6YvPO9yoVK8LFi6oQfipfA3KtmefGDWjf3oGTJy0fU2LQNPNusJ44cQIPDw/CwsLIli0by5Yto3Xr1gluGxAQwPnz56lUqRJBQUFMmjQJf39/Tp06RaFChRI9x9ixYxk3blwC3wmic+fbdO9+Fnnyb10qLFhAlsBADnz5pd6hCGH1wsLC6PpfF5fly5dL33phlUqsX0/5hQvxmzKFJ8WK6RpLljt3iMialchs2XSNQ7zo0iUXvv22Ng8fZgaCAVeCgoJwiZ42NonMTkgjIiK4du0aQUFBrF69mvnz57Nz505ef/31V+5rNBopV64c3bp1Y/z48Ylul9Ad0sKFCwNBgAtdu5r4+eeomGnVxYuMRiO+vr40b94cxzS4a2n46y94/BitZ89UP1dqSut2Sw+kzcwXEhKCm5sbAHfv3iVHjhz6BmQj5FqzjCXtZjhwAPtmzTB98gmm0aNTOcJXs+/YER4/JmrHjjQ5n1xrSbN5s4Fu3ex5+lTdJSxa9DFXr7pZlJCa/cg+U6ZMlCpVCoDq1atz8OBBfvzxR+bOnfvKfR0dHalatSoXLlx46XZOTk44JZhtqtx5+XI7bt2yY80ayJnT3J8gY3F0dEybX6aOHVP/HGkozdotHZE2S7q47STtZj5pM8skud2uXIG334Z27bD/5hvsreGR5M8/Q2Agdmn8/y7XWuLmzYNBgyAqSi3Xrg2LFkVRtqxlx0t2AUmTyRTvbubLREVFceLECfLnz2/RuZYsiSJzZvW1vz/UqQOXLll0KJEaAgNhxAh4/FjvSIQQQlji8WNo0waKFYPFi7Ga/nH580PVqnpHIVDj2ry84IMPYpPRt9+G7dtV1TBLmZWQenl54e/vz5UrVzhx4gReXl74+fnRo0cPAHr16oWXl1fM9t988w1btmzh0qVLHDlyhJ49e3L16lX69+9vUbCtW2vs3Al58qjlc+fAwwMOHLDocCI1rFoFp07pHYUQQghzRUTAW2+p8kpr1xJzB0hPjx5BlSqwf7/ekQggLAx69ICJE2PXffwxrFyZ/MvFrEf2d+/epVevXty+fRtXV1cqVarE5s2bad68OQDXrl3DLs6sPY8ePWLAgAEEBgbi5uZG9erV2bt3b5L6mybmjTdg3z41mcTZs3D3rqrPu2wZeHpafFiREvLlU7Xp7C0v+yCEEEIngwbBzp3q8fjly7Hlnpyc9Ls7+fSpKj2l86AqoWYK9/SE3bvVsp0dTJ8OgwenzPHNSkgXLFjw0u/7+fnFW546dSpTp041O6hXKV4c9u5V3RZ37lRTqr/1FkydKuXJdGdvD0+eqBk8pDapEELYjq1b1fPYfv3iry9aVPUr1UPhwmqKaqGrixfVjcDosrNZssDy5WpenJRis5OQu7nB5s0QPahb02D4cJWQRvdpEDrp2VPNcy+EEMJ2XLny4rz1mqZfMurvr6amFrrat091j4xORvPmVTcDUzIZBRtOSEE9RViyBL76Knbd9Omqc+2zZ/rFleFNmKA+OgkhhBCW+u03+OEHvaPI0P78Exo3hnv31HK5cipBrVEj5c9l0wkpqAGA48fDggXg8F8HhLVrVb/SO3d0DS3jKl9ePWZJzqS2QgghMrY5c9QfdJHmNE11g+zUSQ1kApWY7t2bet15bT4hjfbee+rOfvbsavngQVUT68wZfePKsE6fVtO8xZ0DWQghhEiK27fVHSdXV70jyXCiolT3x5EjY+8rvfsubNoEqTmHR7pJSAGaN4c9eyB6VtIrV1St0p07dQ0rYypWDKpVU3PcCyGEEEl1+TIUKQLr1+sdSYYTEqIGic+YEbtu9GhVkjZTptQ9d7pKSEHdlNu/X5UtA1Xjt3lz1RVFpKEsWVQH39Kl9Y5ECCGELSlQAObPhyZN9I4kQwkMVN0d161Tyw4OsHAhjBuXNvMjpLuEFNS17O8Pb76plo1GNfD722+lW2Oa274dfHz0jkIIIYStcHKC3r0ha1a9I8kwzpxR3RwPHVLLLi7w99/Qp0/axZAuE1JQfUnXrVNTW0X7+mvo318lqCKNLF4Mv/yidxRCCCFswfTp8OmnekeRofj5qe6NV6+q5cKFVfH7Zs3SNo50m5CCut08ezZ8/33sul9+UdP0BgXpF1eG8tNPMkpSCCFE0phM8igzDS1dCi1aqO6NoCbk2rdPdX9Ma+k6IQXV7+HTT1VZTCcntc7XF+rXh+vX9Y0tQ8iaVf0nXL0qbzJCCCFebvhw+N//9I4i3dM0VTLz3Xdjnxq3bq26OxYooE9M6T4hjfbOO2pWtJw51fKJE6q/xLFjuoaVMRw5ouZ79ffXOxIhhBDWSNNUF6+nT/WOJN0zGlX3xdGjY9cNHKgeZmbLpl9cGSYhBahXT92KLllSLd+6pe6U/v23vnGle1Wrwq+/Qs2aekcihBDCGp06Bf36qTI5ItUEBak7oXGHdnz/vepdFz25kF4yVEIKqgpRQICalxXUh7F27WDePH3jStcMBujRAzJn1jsSIYQQ1qhCBdW1S0o9pZrr19VNuK1b1bKTE6xYobo1pkVZp1fJcAkpQO7csG2bmhIL1KwEH3wAn3+u+lOLVPK//6n+QUIIIUS0J0/UH9+CBa0jM0qHjh6FWrVUd0UAd3eVB3Xpom9ccWXIhBTUzboVK+CTT2LXff89dO8eO2+rSGHZsqniZkIIIUS0jz6CVq30jiLd+vtvaNBAzcYKqttiQADUratvXM/TuceAvuzs1E274sVh6FD1AW3FCrhxQ3XudXfXO8J05sMP9Y5ACCGEtenfH+7f1zuKdGnuXBg8WD0JBtVdce1a9aTY2mTYO6RxDRqk/oOyZFHLe/ao/7QLF/SNK10KDVW/IcHBekcihBDCGtStCx066B1FumIyqW6IAwfGJqOdOqnH9NaYjIIkpDHatlVVifLlU8vnz6ukNCBA37jSnYcPVT9SPz+9IxFCCKGnoCCVJf37r96RpCthYar7YdxJgT75RD0BtuaxxZKQxlG9uioLVb68Wr5/Xw34++MPfeNKVwoWVH0i2rfXOxIhhBB6unFDjazXs/hlOvPggZryc8UKtWxnB7Nmqe6Jdlae8Vl5eGmvaFE1h2t05YmwMOjcGSZPlomGUoy7u2rMO3f0jkQIIYReypeHgwf1mxoonbl4UT3Z3bNHLWfJorojDhqkb1xJJQlpAnLkUKPSevdWy5qmbncPHQqRkbqGln589BE0by5ZvhBCZER79sClS3pHkW4EBKjZJ8+fV8v58qluiG3b6huXOSQhTUSmTLBwIYwbF7tu1izo2FFmNksR770H06bpHYUQQgg9fPYZjBqldxTpwh9/qKe60YUKXn9ddT+sXl3fuMxlVkI6e/ZsKlWqhIuLCy4uLnh4ePD3K+bdXLVqFWXLlsXZ2ZmKFSuycePGZAWclgwGNdfr4sWxU2r99Rc0bBhbz0tYqGpV9RskRZCFECLj2bIFZszQOwqbpmkwZYrqVhhdP71xY3XzuWhRfWOzhFkJaaFChZg4cSKHDx/m0KFDNGnShA4dOnDq1KkEt9+7dy/dunWjX79+HD16FE9PTzw9PTl58mSKBJ9WevWCzZvB1VUtHzmibo0n8mOLpHr4EDw9Ze5iIYTIKDRNPWbMkkX6jiZDVJTq+fbxx7E933r1gk2bVLdDW2RWQtquXTtat25N6dKlee211/juu+/Ili0b+/btS3D7H3/8kVatWjFq1CjKlSvH+PHjqVatGjNnzkyR4NNSkybqU0eRImr52jVVOm37dn3jsmmuruq3SvpACCFExuDnB4UKSaHvZAgJUd0H46ZSY8bAokWqu6GtsrgPaVRUFMuXLyckJAQPD48EtwkICKBZs2bx1rVs2ZIAGy3uWb686pdRrZpaDgpSs50tWaJvXDbL3h7Wr4emTfWORAghRFooUwa++ELNXynMFhioug2uX6+WHRxUIjp2rO33gDN76tATJ07g4eFBWFgY2bJlY82aNbz++usJbhsYGEjevHnjrcubNy+BgYEvPUd4eDjh4eExy8H/zepjNBoxGo3mhpyicuWCrVuhZ097Nm60w2hUo/EvXIjiq69MVnNBRLeT3u2VJNevY9i1C617d70jsa12sxLSZuaL21bW8L5mK+Ras4xVtVvu3DBihNWXrLGqNvvP6dPQoYMDV6+qRMPFRWPlyiiaNNGwljCT015mJ6RlypTh2LFjBAUFsXr1anr37s3OnTsTTUot4e3tzbi4w9v/s2PHDrJEz++ps379ACqycWMJAMaPt2f37psMGnQMR0frKWXk6+urdwivVGLdOl774w98nZ2JcnbWOxzANtrN2kibJV1Y9AgEYPv27ThbyXVvK+Ras4ze7fb6kiU8LlmSW3Xr6hqHOfRus2gnTuTC27smz56pZDR37md89dU+wsKeYE1jxZ89e2bxvgZNS14hyGbNmlGyZEnmzp37wveKFCnCyJEjGT58eMy6MWPG4OPjw/HjxxM9ZkJ3SAsXLszt27dxd3dPTrgpStPgxx/t+OwzOzRNXSSNG5tYsSJK907FRqMRX19fmjdvjqOjo77BvEpoqPq0nD273pHYVrtZCWkz84WEhODm5gbA3bt3yaH3G4aNkGvNMlbRbiYT9j17ojVogGngQH1iMINVtNl/li418MEH9hiNKs+oWlXDxyeS/Pl1DStBDx48IH/+/AQFBeHi4mLWvmbfIX2eyWSKlzzG5eHhwbZt2+IlpL6+von2OY3m5OSEk5PTC+sdHR11vzCeN2oUlCgBPXuqsgs7dtjRqJEdGzdaR9kFa2yzF0THFxKiBjmZeRGnBptoNysjbZZ0cdtJ2s180maW0b3dVq0CwF6/CMymZ5tpGowfrwYsRWvTBpYvN5Atm3Ve/8lpK7MGNXl5eeHv78+VK1c4ceIEXl5e+Pn50aNHDwB69eqFl5dXzPbDhg1j06ZNTJ48mbNnzzJ27FgOHTrEkCFDLA7YGr39thptnyuXWj59WpWFOnxY37hsSmQkVKgA33+vdyRCCCFSUmCgGnwhM/MlWUSEmj8mbjL64Yfg4wPZsukWVqoyKyG9e/cuvXr1okyZMjRt2pSDBw+yefNmmjdvDsC1a9e4HadifJ06dVi2bBnz5s2jcuXKrF69Gh8fHypUqJCyP4UV8PBQI/BLl1bLgYHQoIEqpC+SwMEBpk6F99/XOxIhhBApadkydefmyRO9I7EJQUHqTuiiRbHr/vc/NVukQ7Kfa1svs360BQsWvPT7fn5+L6zr3LkznTt3NisoW1WypJpPtkMHVbP02TP19YwZMGiQ3tHZAE9PvSMQQgiR0kaMUO/vVtAdy9pduwatW8dOvOPkBL/+qmZjSu9kLvsU5u6unky8845aNplg8GDV19Rk0jc2mxAQoGYhCA3VOxIhhBDJFRSkCmSWKKF3JFbv+Vkg3d1Vd8CMkIyCJKSpwtlZPaH4/PPYdZMmQZcukme9Up48kDWrmlZUCCGE7QoKUono4sV6R2L1Nm5U3fyiez2WKqXuz9Spo29caUkS0lRiZwfe3jB3rpqQCOCPP9SkRPfu6RubVStZUk1BUbCg3pEIIYRIDmdn9YfwuRkbRXxz5kC7dqrQDKgxKQEBsWNSMgpJSFPZ+++r/Cp6VFxAgLrY/v1X37is3u7dYCUFiYUQQljAyUn9EZQbDAkymeDTT9Xo+egufZ07w7ZtsVV7MhJJSNPAm2/Crl1QoIBavnhRJaW7d+sbl1WbNAlmz9Y7CiGEEJb4+Wc1Z72UekpQWBh07apGz0cbNQqWL4fMmfWLS0+SkKaRKlVUWaiKFdXyw4fqKcaKFbqGZb0WLYLVq/WOQgghhCWePlVlngwGvSOxOvfvq+57/80TgJ2dKun0ww/q64wqA//oaa9wYXVX9L+yrYSHq09I338vHyJfkCOH+s28ckXKEwghhK0ZMULVPBTxXLigBirt3auWs2aFdeukNCRIQprmXFxgwwY1A0O0zz9XfUgiI/WLyypduqSGGq5Zo3ckQgghkiIyEhYskJIyCYgeQ3L+vFrOlw927lRF8IUkpLpwdIT58+Hbb2PXzZ0L7dvLRBbxlCihOtS8+abekQghhEiKffvggw/g7Fm9I7Eqq1dD48bqcT1A+fKwfz9Ur65vXNZEElKdGAzw5ZewdClkyqTW/f23qkN286a+sVmVTp0gSxbp0yCEELagXj013VDVqnpHYhU0DSZPVnXIw8PVuiZNVPe9IkX0jc3aSEKqsx49YMsW1WUS4NgxNVPDiRN6RmVlFi+GFi0kKRVCCGt2+7Z6n44uKZPBRUbCkCHwySexf75691Y3n6L/5otYkpBagYYNVQfnYsXU8o0bULeulOGMUaQIVKoU+/FSCCGEdTGZVOmYjz7SOxKr8PQpdOwIP/0Uu27cOFi4MPapqIjPQe8AhFKunOp6064dHDyo+pK2bq36lsYdAJUhNW6sXkIIIayTnZ3KvuTWH7dvQ9u2am56AAcHNc6rVy9947J2cofUiuTNC35+0KGDWo6MhH794Ouv5Wk1UVHqN1pmExBCCOvUsCFUrqx3FLo6dUp1u4tORl1dYfNmSUaTQhJSK5Mli5rzftiw2HXffgvvvpvBn1jb2anbxdu36x2JEEKIuFavVmViwsL0jkRX27er7nbXrqnlIkVgzx41iEm8mjyyt0L29jBtGhQvrmoLaxr89pvqW7pmDbi56R2hDgwG8PcHZ2e9IxFCCBFX5sxqIFMGfn9esgT69wejUS1XqwZ//QX58+sbly2RO6RWbNgw+PPP2Hltd+5UMzxcvqxvXLpxdlbZ+b59ekcihBAiWps2MGeO3lHoQtPUYKXevWOT0bZt1d9rSUbNIwmplfP0VP1K8+RRy2fPqv4pBw7oGZWOdu5UU13s3693JEIIkbGZTDB+PNy6pXckuoiIgL59YezY2HWDBqknmdmy6RaWzZKE1AbUrKluCpYpo5bv3oVGjWDtWl3D0kfDhioprVlT70iEECJju3hR9S+7dEnvSNLc48dqEsHFi2PXTZoEM2eqUfXCfJKQ2ojixVWt0oYN1XJoqKpxNn26vnGlOYNBTWdlMKiPp0IIIfRRujRcv65mZ8pArl5VP3L0GFsnJ1i1Cj7+WP1pEpaRhNSG5Mypykd0766WNU31Mx0+XFVFylC++UYNXczw9bCEEEIH586p6u9ZsugdSZo6ckR1mzt1Si3nyqUS006d9I0rPZCE1MY4OcHSpfDVV7HrfvxR/TI8e6ZfXGmuYUPVi1wSUiGESHu9emW44pp//aUe0AUGquVSpSAgQA02FsknPR1skMGg+pEXKwYffKDujvr4qMmM1q1TBfbTvYYNY/svCCGESFurVkFIiN5RpJnZs9W89CaTWq5bV/3dzZVL17DSFbPukHp7e/PGG2+QPXt28uTJg6enJ+fOnXvpPosWLcJgMMR7OWfgWmUpqV8/2LgRsmdXywcOqAHoZ8/qG1eaMRph1CjYtEnvSIQQImOIilKvIkXUnNfpnMmk/swMGhSbjHbuDFu3SjKa0sxKSHfu3MngwYPZt28fvr6+GI1GWrRoQcgrPiW5uLhw+/btmNfVq1eTFbSI1aKFmk2zUCG1fPmyenywa1cG6Fnt4ABnzqgZA4QQQqS+33+HChXgyRO9I0l14eF29Ohhz6RJses+/RSWL8/QcwCkGrMe2W967k7UokWLyJMnD4cPH6ZBgwaJ7mcwGMiXL59lEYpXqlRJlYVq2xaOHYNHj+DNN+0ZPLggrVvrHV0qMhhg/XoZ1iiEEGmlQgXo2TP20Vw6df8+jBlTh7Nn1X07OzuYNQsGDtQ5sHQsWX1Ig4KCAMiZM+dLt3v69ClFixbFZDJRrVo1JkyYQPny5RPdPjw8nPA4E7cHBwcDYDQaMUZPhSDiyZMHtm2D7t3t2bzZjogIA1On1sDVNQIvL2P6ztnCwjCsXInWs6d610iG6OtLrrOkkzYzX9y2kve1pJNrzTIp2m7ly6tXOv4/OH8e2re35+JFdwCyZtVYtiyKN9/UbPrHPnzYwMyZduzbZ+DiRQOffx7FN9+YUvQcybnGDJpm2TBlk8lE+/btefz4Mbt37050u4CAAM6fP0+lSpUICgpi0qRJ+Pv7c+rUKQpFP2d+ztixYxk3btwL65ctW0aWDFZiwlxRUQbmzq3Eli3FYtY1a3aVgQOP4+CQPkek5zx9mrpffYX/Dz8QVKqU3uEI8UphYWF07doVgOXLl0u/emH17IxGqk6fzr+dOvGkaFG9w0k1Z8+68d13tXjyxAkAN7cwvvpqHyVLBukcWfKtX1+Cv/8uTpkyD9m/Pz9t2lyiR4+UHXTy7NkzunfvTlBQEC4uLmbta3FC+uGHH/L333+ze/fuRBPLhBiNRsqVK0e3bt0YP358gtskdIe0cOHC3L59G3d3d0vCzVA0Db7/XmP06Ewx65o3N/H771GYeX3Yjhs3YjvSJoPRaMTX15fmzZvj6OiYAoGlf9Jm5gsJCcHNzQ2Au3fvkiNHDn0DshFyrVkmRdrt0iUcunYlcvHidDuYafVqA3372hMerh4pFikSzJYtjpQokT4KEplMsQ8RS5d2oFs3U4rfIX3w4AH58+e3KCG1qJWHDBnCX3/9hb+/v1nJKICjoyNVq1blwoULiW7j5OSEk5NTgvvKm1DSfP65keDgg0yfXoOICAO+vnY0bmzHxo0pkrdZn+LF1W/bzZtQuHCyDyfXmvmkzZIubjtJu5lP2swyyWq3MmXgyBEc02H/L01T035++mnsuqZNTbz33i5KlGiRbq81e3t7HB3tU/SYyWkrszrcaZrGkCFDWLNmDdu3b6d48eJmnzAqKooTJ06QP39+s/cV5qlX7xabN0cR3cX3xAmoVUsNfEqXPvtMVS2OjNQ7EiGESD/+/lvNW58Ok9HISBg8OH4y2qcPrF0bRdas8rckLZmVkA4ePJilS5eybNkysmfPTmBgIIGBgYSGhsZs06tXL7y8vGKWv/nmG7Zs2cKlS5c4cuQIPXv25OrVq/Tv3z/lfgqRqLp1NQICoEQJtXzrFtSvn05Ldw4YAIsXq3JQQgghkk/T4PPP4Ycf9I4kxT19Cp6equh9tG++gV9+gUyZ4m+7aJHKxxctsvx8KXGM9Mysv9yz//tfa9SoUbz1CxcupE+fPgBcu3YNuzgjnR89esSAAQMIDAzEzc2N6tWrs3fvXl5//fXkRS6S7LXXVFmo9u3Vv0+fqhJRP/0E77+vd3Qp6LXX1AvUm2g6/DQvhBBpymBQfzji3HhKD27dUn8Hjx5Vy46OsGABvPuuvnFlZGYlpEkZ/+Tn5xdveerUqUydOtWsoETKy50btm9Xv2x//KEm2vjgA1VI/7vvkl0tyXpoGnTqBDVrqkf4QgghLBMcrJ5p58wJmTPrHU2KOXkSWreG69fVsqsr/PknNGmib1wZXXpJQ0QSZM4MK1fCxx/Hrps4EXr0gLAw/eJKUQYD1KgRe6dUCCGEZb7/HipWhDhVb2zdtm1qHvroZLRoUdi7V5JRayAJaQZjZ6dGE86cGXtXdPlyaN4cHjzQN7YU4+UFHTvqHYUQQti2YcNg3jxIoOqNLVq8GFq1Ujd+AapXV70RktuDsHNndS/kZa+XlGsX/5HRHxnU4MFQpAh07QrPnqlfljp1YONGKFlS7+hSwP37MHYsfP015M2rdzRCCGFbTCY1BWCbNnpHkmyaBuPGqVe0du3g998ha9bkH79cOejd+8X1167Bjh2qf2qlSvCSapdp4t492LlTff3sGZw9C6tXqzZ48019YwNJSDO0du3A31917A4MhH//hdq11fTwtWvrHV0y2durUgJvvSUJqRBCmOPcOZWIrl2rpgm1YRERqgDLkiWx6wYPhh9/VH8mUsI337y47soVaNRIJaMrV2IVk9KcOqXu5kb74w/1KlpUxas3eWSfwT3/yOL+fWjcWF2kNs3NTb2pSscgIYQwT6ZM0LSpzT8ue/xYPaKPTkYNBpg8GWbMSLlkNCHRyeitWyoZ9fRMvXOZo1Ejdbf4+Zc1JKMgd0gF6tPRnj3qZuKOHWqAU+fOqq/piBE2XD3J3l7VuNq+XdW8EkII8WrFi8PcuXpHkSxXr6qR9KdPq2VnZ1i6FN5+O3XPGzcZXbUKOnSw/FgdO8KZM+bts2SJKjJjiyQhFQDkyKGecEc/2tA0NRr/8mWYNi11P02mqqVLYeRI9e6UO7fe0QghhPXSNBg+HN55Rw0qsFGHD8d2RQPIlQvWrQMPj9Q9b0omo6D+/p47Z94+z54l75x6kkf2IkamTGoGiTFjYtfNnKk+pYWE6BZW8rz3nvqIKcmoEEK8XHAwHDhg0yVX/vpLzSAdnYyWLq26pdlaMgpqmu+EHrG/7PXcvEU2RRJSEY/BoAanL1oUOwPn+vXQsGHsL7hNyZRJ9UmIjISbN/WORgghrJerqyrK2bat3pFY5KefVCIYfZewbl0ICEj9rrBxk9HVq1MmGU1prypLFfelF0lIRYJ691aP8F1d1fLhw2rk/alT+sZlsT591K3eJMw2JoQQGY6/v5rCSO+sxAImE4wapUbPm0xq3TvvwNat4O6euueOTkZv31bJqLUOV9A0WLFC9avNl0+N+q9fH3btevEuq14kIRWJatpUDXYqUkQtX72qPnHu2KFvXBYZORJmz7a5N1ohhEgT330HX3yhdxRmCw1VyeekSbHrPvsMli1TA5lSU9xkdNUq601Go02bpvrTzpql4i1YUP2dP35c78gUGdQkXqp8edX/pm1bOHIEgoKgZUuYPx969dI7OjNUq6b+jf4IaCefxYQQIsb69apOkg25d089Hg8IUMt2duqx/QcfpM35e/dWN2peew3+/FO9nte9O7RokTbxvMr69fHvGDdrpmaGnTVLTcilN0lIxSvlz69md+jaFTZsAKNR/SJeuaImQrKZm46RkaooXfv28NFHekcjhBD6Cw5W5fEKFFAzM9mI8+fV7EIXL6rlrFlVzc/WrdPm/CaT6soGalKZf/9NeLu4hej19nz3BTs7qFBBjea3BnKbSCRJtmzg4wODBsWuGzNGDWKPiNAtLPM4OKiq/+XK6R2JEEJYh8mToUoV9ezbRuzZo0bNRyej+fOrvpBplYyCSuaePn31qHdrnnk1KgoOHoRSpfSORJE7pCLJHBxUGagSJeCTT9S6RYvg+nXVmTtHDj2jS6Ivv9Q7AiGEsB4jRqjBAZkz6x1JkqxcqbqLhYer5QoVYONGKFxY37hs0cyZcO1a/BtNepI7pMIsBoMqmL9qFTg5qXXbtkG9eurCtgl37qhR91IGSgiRkUVEqDsJ1tLJ8SU0DX74QQ1gik5GmzWD3bslGbXE/v3w+efw1VeqH6k1kIRUWKRTJzUjZ65cavnUKahVSw18snrOznD0KFy4oHckQgihj0OHVI3ms2f1juSVIiPVXbzPPotd17evujMaXZpQJN2VK2owWLt28SfC0ZskpMJideqo0Y2lS6vlwEA1Q8Zff+kb1yu5uqopMBo21DsSIYTQR758anSqtXQgTMTTpyp5mjMndt348bBgATg66heXrXr8WPVrLVYMFi+2rkHJkpCKZClVSk3sUbeuWg4JUW8es2frG9crGQzw6JGqESLF8oUQGU2hQjBxYuyUfFbo1i11k2PjRrXs6Ai//qoeM6d1IlWlirqbWKWKvsdIjogIeOstNZPV2rXW123Yeq9EYTNy5VIzYvTurTqcm0zq8cqlS/D991Zc8nPPHvj0U1VYNbXnlhNCCGtgNKo+V59+GnsnwQqdOKHu5F2/rpZz5IA1a/Sbq71KleQnkilxjOQYNEiVcPz5Z1XqKbrck5MTVK2qX1zRJCEVKcLZGX7/HYoXV0koqJkzrlyBJUus75MYoN7trlyJ7QgrhBDp3cOHqsRTtmx6R5KorVvh7bdViVRQXV03boTXX9c3Llu3dau6YdSvX/z1RYuqP4V6s9Z7V8IG2dmpJ0Bz5sTeFV29Wo2EvH9f39gSZDCoZDQ8PLbCsRBCpGd588KWLVC5st6RJGjhQlXwPjoZrVFDzRYoyWjyXbmScK1Ua0hGQRJSkQo++EBNUZY1q1reu1cVMT5/Xt+4EjV2rHoHDAvTOxIhhEg1hoULVSV0K6RpMHq0mmwlMlKta98e/PzU+CuR/pmVkHp7e/PGG2+QPXt28uTJg6enJ+fOnXvlfqtWraJs2bI4OztTsWJFNkb3UBbpVuvWauaM/PnV8oULKinds0ffuBI0YoR613N21jsSIYRIHVFR2P38s1WWQYmIUGMQxo+PXTd0qJobPvrGhkj/zEpId+7cyeDBg9m3bx++vr4YjUZatGhBSEhIovvs3buXbt260a9fP44ePYqnpyeenp6cPHky2cEL61a1qiq+W6GCWn7wAJo2VQOfrEqePOp5kMmkamIIIUR6Y29PlL8/fPGF3pHE8+gRtGqlRs+D6kk1dSr8+CPY2+sbm0hbZiWkmzZtok+fPpQvX57KlSuzaNEirl27xuGX9L/78ccfadWqFaNGjaJcuXKMHz+eatWqMXPmzGQHL6xf4cJqJo3mzdVyeLiaaeOHH6yw2lLXrtj36aN3FEIIkbKOHiVLYKAq8RQ9xZ4VuHJFDfTfsUMtOzurcQfDh1tXfUyRNpI1yj4oKAiAnDlzJrpNQEAAI0eOjLeuZcuW+Pj4JLpPeHg44dFzgwHB//VuNhqNGI3GZESccUS3kzW0V5Ys4OMDgwbZs3ix+gz02Wdw8WIU06aZrKYMnuG994g0GCAszCrazVZY07VmK+K2lbyvJZ1ca5ax+/JLqty5g/Hdd/UOJcbhwwY8Pe25c0dlnrlyaaxZE0WtWhrW8N8r15plktNeFqcCJpOJ4cOHU7duXSpEP5NNQGBgIHnz5o23Lm/evAQGBia6j7e3N+PGjXth/Y4dO8iSJYulIWdIvr6+eocQw9MTjMbXWLasHADz5tlz+PA9PvnkEJkzR+kb3HN8t2yRj+hmsqZrzdqFxRlAt337dpyl/7JZ5Fozj33//mR68oRQK2m3AwfyMnlyDcLD1XtsgQJP+frrAB48eIa1DTGRa808z549s3hfixPSwYMHc/LkSXbv3m3xyRPj5eUV765qcHAwhQsXpnHjxri7u6f4+dIjo9GIr68vzZs3x9GK5ldr0waaN4/k/fftMRoNHD6cj++/b4OPTyQFCugdHRhDQwlq3hz37t0xDBqkdzg2wVqvNWsWt999kyZNyJEjh37B2BC51sz0+DFERmJ0dbWadps9246JE+0wmVQyWreuidWrnXB3b6RrXM+Ta80yDx48sHhfixLSIUOG8Ndff+Hv70+hQoVeum2+fPm4c+dOvHV37twh30vqODg5OeGUQD8XR0dHuTDMZI1t1qePmke3Y0f1fnnsmIH69R3ZsAEqVtQ5OCC4aFFyFy6Mg5W1m7WzxmvNWsVtJ2k380mbJdHEifDHH3D6NKBvu5lMMGoUTJkSu65rV1i40A5nZ+utQCnXmnmS01ZmXQWapjFkyBDWrFnD9u3bKV68+Cv38fDwYNu2bfHW+fr64uHhYV6kIl1p1EjVJy1WTC1fvw716qmZJPR2tkcPtHbt9A5DCCGS57PPYN48NQm8jkJDoUuX+Mno55/Db79JtT0Ry6yEdPDgwSxdupRly5aRPXt2AgMDCQwMJDQ0NGabXr164eXlFbM8bNgwNm3axOTJkzl79ixjx47l0KFDDBkyJOV+CmGTypVTM3DUqKGWg4NVffqFC/WNC4AnT1SF/6NH9Y5ECCHMo2lqoo88eaBFC11DuXcPmjRRN2pBlXKaOxe8vWNn9BMCzExIZ8+eTVBQEI0aNSJ//vwxrxUrVsRsc+3aNW7fvh2zXKdOHZYtW8a8efOoXLkyq1evxsfH56UDoUTGkTevqknfoYNajoxUM3WMHq1zWShnZ/WYy1rmVBNCiKRatw7KlIHnusultX//hdq11Y0HgGzZ1Cx+77+va1jCSpnVh1RLQobg5+f3wrrOnTvTuXNnc04lMpCsWdWn55EjYfp0tW78eLh8GRYsgEyZdAjK0RH8/WWkvRDC9pQvD/36qTukOtm9W91oePhQLRcoABs2QJUquoUkrJzcMBdWwd5ezcwxbVpsDrh0KbRsqWby0IXBAM+ewYQJqj+BEELYglKl1GMmnT5Qr1ihZuWLTkYrVlR3SSUZFS8jCamwKsOGqbulmTOrZT8/NZOHbk/O79+HSZPUCCwhhLBmly6BhwdcvKjL6TUNvv9ejZ6PiFDrmjdXd0sLF9YlJGFDJCEVVqdjRzWVXO7cavnMGahVCw4e1CGYIkXg2jU12bIQQlizsDDVMf8lZRVTS2QkfPihGj0f7b331GN6F5c0D0fYIElIhVWqVUs94ilTRi3fvatKRa1bp0Mw2bJBVJSa/1TXkVZCCPESr7+u3qeyZk3T0z55Au3aqdHz0b79FubP173ilLAhkpAKq1WihHpS3qCBWn72TE0/OmOGDsFs3w5vvQXHj+twciGEeInQUDXjiA6P6m/eVO/RmzapZUdH1f//yy9lTKgwjySkwqrlzAlbtkD37mpZ0+Cjj2DECHXTMs00awYnT0qvfCGE9bl6FQ4cSOM3RThxQpV1OnZMLefIod6ve/RI0zBEOiEJqbB6Tk7w66/qE3e0adOgc2d11zRNGAzqcZimqXdhIYSwFmXLqg/Mr72WZqf09VUDTm/cUMvFiqknWo0apVkIIp2RhFTYBDs71Sfp559ViSiANWvUDCB376ZhIMuWQdWqUjBfCKE/TVNVQG7fTtNpj375BVq3Vn1HQc22t2+fmn1PCEtJQipsSv/+sHEjZM+ulvfvV4+Mzp1LowA6dVIBFCuWRicUQohE3Lyp6izt358mp9M0+PprVXM/MlKt69BBlefLmzdNQhDpmCSkwua0aAG7dkHBgmr58mVVes/fPw1O7uQUOzf0/ftpcEIhhEhEoUJqIFP03MupKDwcevVST6qiffSRqhudxoP6RTolCamwSZUrq5sClSur5UePVAHm339PowDmz1f9th48SKMTCiFEHOvWqWfmLi6pPpz90SNVinnpUrVsMMDUqWp2veguVEIklySkwmYVLKjuirZsqZYjItRofG/vNCgX2r49/PADuLml8omEEOI5jx7Bu++qD8ap7MoVNXjJz08tOzuru6LDh6f6qUUGIwmpsGkuLrB+PQwYELvuiy/g/ffBaEzFE+fJo6YhsbNL5RMJIcRz3Nzgn39g8OBUPc3Bg2qSkjNn1HLu3Cox7dgxVU8rMihJSIXNc3RUM4R4e8eumz9fzRwSHJzKJ583D954I3biZiGESE2HDqn3m6JFIVOmVDvNunWqhFN0FZMyZdRI+lq1Uu2UIoOThFSkCwaDmkP5999j36M3b4b69WPr5KWKWrXUyHshhEhtISFqUGXcT9+pYMYMNStedJ3n+vVVjdESJVL1tCKDc9A7ACFSUteuqm+ppyc8fKieatWuDRs2xA6ASlGVK8ceWNNkrjwhROrJmlVNY5xKZeeiomDUKDVgKVq3brBwoSowIkRqkjukIt15/tP8zZtQr566Y5pqlixRA51SfTSVECJDunZNvb9UqaLm6Exhz56p2e/iJqNffKFG1ksyKtKCJKQiXSpTBgICYvs7PX0KbdqomZ5SRYEC6q6F9CUVQqS08HA11P3rr1Pl8Hfvqlnv1qxRy/b2qnv8d9+l6QRQIoOTR/Yi3cqTB3bsgJ494c8/1eOo999XhfS//TaF32ibNVMvIYRIaU5OsGgRlCqV4oc+d05NA3rpklrOlg1Wr44tpydEWpHPPiJdy5wZVq6EkSNj13l7qyQ1PDwVTrhmDXzwQSocWAiRIQUFqX+bNlUj61PQrl1qlrvoZLRgQdi9W5JRoQ9JSEW6Z28PkyerkaPRd0V//13N7PTwYQqfzGhURatTJdsVQmQoERFQpw6MHZvih16+XD3UefRILVeqpMo6pcrgTyGSQBJSkWEMGQI+PpAli1retUu910ffHUgRXbqoW7IyCkAIkVyOjvDll/D22yl2SE2DiRPV6PnoLu8tWqj3w0KFUuw0QpjN7ITU39+fdu3aUaBAAQwGAz4+Pi/d3s/PD4PB8MIrMDDQ0piFsFi7drBzJ+TNq5bPnVNlofbtS+ET7dgB48al8EGFEBlGeLgqI9e9O1SsmCKHjIxUPYq8vGLX9esHf/2lZr0TQk9mJ6QhISFUrlyZWbNmmbXfuXPnuH37dswrT5485p5aiBRRo4ZKQMuVU8v37kHjxrEjTFPEuXNqjj0ZdS+EMJfRqB7fTJmSYocMDXWgY0f7eJVGvvtOVR5xdEyx0whhMbNH2b/55pu8+eabZp8oT5485EiF2mlCWKJYMVWr9K231M3MsDD1VOyHH+xSZiDr+++rl9RMEUKYy85Ojbxs0CBFDnfzJnh51ePKFfV+lCmTKnbfvXuKHF6IFJFmfy2rVKlC/vz5ad68OXv27Emr0wqRqBw5YNMmePddtaxpMGqUPfPnVyQqKpkHt7NTr+PH41eaFkKIlzGZ1EjMESOgevVkH+6ff6B+fQeuXHEFwM0NtmyRZFRYn1SvQ5o/f37mzJlDjRo1CA8PZ/78+TRq1Ij9+/dTrVq1BPcJDw8nPM4o5eDgYACMRiNGozG1Q04XottJ2uvlDAaYPx+KFLHju+/sAdiwoQSdOkWydKmRrFmTd3y7bduwW7SIyH79VA2qdEiuNfPFbSt5X0u6dH+tRUZi37w5pt690fr0SfbhfH0NdO1qz5MnakrjokVNrF8fRdmyqleASFy6v9ZSSXLay6Bpls91aDAYWLNmDZ6enmbt17BhQ4oUKcKvv/6a4PfHjh3LuAQGhCxbtows0UOkhUhh27YV5qefqhAVpR4clCr1iC+/3I+bWzJKOJlM2EVFYZJOWiKOsLAwunbtCsDy5ctxdnbWOSJhDeyMRsr+/ju3a9fm0WuvJetYW7cW4aefKmMyqfez0qXV+1mOHFKSTqSeZ8+e0b17d4KCgnAxc6ScLgnpqFGj2L17NwEBAQl+P6E7pIULF+b27du4u7tbGm6GYjQa8fX1pXnz5jhKMpRkW7ZE0aWLPc+eqTYrWlRj3brImAFQFrtyBbuNGzENGpT8IK2MXGvmCwkJwc3NDYC7d+9K//okStfXmqapRzYpcJgxY+yYONE+Zl3btpG8++4m2rZtkv7aLZWk62stFT148ID8+fNblJDqMnXosWPHyJ8/f6Lfd3JywimBOo6Ojo5yYZhJ2sw8LVqAt/cuJk1qzPXrBq5eNdCwoSN//qlG4lts61aYMgX7Pn3A1TWlwrUqcq0lXdx2knYzX7prs8hIVZNuwIBk1RwND4f33oNly2LXDRsGEydqbN4clf7aLQ1Im5knOW1l9qCmp0+fcuzYMY4dOwbA5cuXOXbsGNeuXQPAy8uLXr16xWw/bdo01q5dy4ULFzh58iTDhw9n+/btDB482OKghUhNRYs+YdeuSKK7OD9+rKbSS6SHSdJ88AGcPJluk1EhRDKEh0P+/FCggMWHePRIvU9FJ6MGA0ybpl729i/bUwjrYPYd0kOHDtE4zq2ikf9NEt67d28WLVrE7du3Y5JTgIiICD7++GNu3rxJlixZqFSpElu3bo13DCGsTYECqoD+O+/Axo1qAECvXnDlCnz1lQVP1uzsIHt2ePBATRfVr18qRC2EsElZs6o6TBa6fBlat4azZ9Vy5swqMTWzN50QujI7IW3UqBEv63a6aNGieMuffvopn376qdmBCaG3bNlg7VoYOhTmzFHrRo9Wb/5z51pYTHrdOvj0U/V4TiaHECJjCwtT7wVffQUNG1p0iAMH1CHu3lXLefLA+vVQs2YKxilEGpCq3UK8hIMD/PQT/O9/sesWLlR3I4KCLDhgnz7qNoYko0KIkBB1d9TCwbpr10KjRrHJaJkyEBAgyaiwTZKQCvEKBgN88gmsXAnRY+22boW6dSFO75SkHyx3bnj2DH75RQ2JFUJkTO7uqgtPhQpm7zp9OnTsCKGharlBAzX7XIkSKRuiEGlFElIhkqhzZ9i+PfZmxqlTULs2HDliwcG2boXBg+H8+RSNUQhhA4KD1Qikf/4xe9eoKBg+XI2ej/482727mn0pZ86UDVOItCQJqRBmqFMH9u0jZr7727fVnYmNG808UPv2cOECJLP4tRDCBj1+rEo9mVmn8dkz6NQJfvwxdt2XX6oKIAlUShTCpkhCKoSZSpVS/bTq1FHLISFqUEH0wKckK1hQ/VH65Rf1rxAiYyhSBLZtg2LFkrzL3buqFrKPj1q2t4eff4Zvv1VFPISwdXIZC2GBXLnU35POndWyyQQffqgG0JtMZhzon39g4EDYvTtV4hRCWJHbt6FNG7M7n587p7oHHTiglrNnhw0boH//VIhRCJ1IQiqEhZydYflyGDUqdt3//gddu6pqLklSrRpcuqSGygoh0rf791X/0cyZk7yLvz94eKhyc6AerOzapbqgCpGeSEIqRDLY2cEPP6jSUNGPzVatgqZN1d+eJClUSI1O+O03ePo01WIVQuisYkWVTebOnaTNf/8dmjdXszABVKqk+rBXrpyKMQqhE0lIhUgBH36oilFnzaqW9+5VdzUuXEjiAW7eVNOLrlmTajEKIXRy4ICq0fT4cZI21zTw9laj5yMi1LqWLVUuW6hQ6oUphJ4kIRUihbRurR6v5c+vli9cUP2+9u5Nws6FCsGZM/Duu6kaoxBCB8HBav7h6E+sL2E0wvvvwxdfxK7r31994DVzUL4QNkUSUiFSULVq6pFadJ3rBw+gSRP1GP+VChdW/65fH9thTAhh+5o1g7/+euV8w8HBqmLH/Pmx6yZMgHnzLJyqWAgbIgmpECmsSBE1aL5ZM7UcHg5duqgBT6+cmCk8XFW8XrAg1eMUQqSyX3+Ffv2SVNbtxg2oXx82b1bLmTLBsmXg5aUmeBMivZOEVIhU4OqqiuX36RO77tNP1eRML/3b5OQEe/bA+PGpHaIQIrUZDCqzdHB46WbHj6vuPdETN7m5ga8vdOuWBjEKYSUkIRUilTg6qpr333wTu272bOjQ4RWD6fPnV3/IAgLUXyUhhG2JfhTSs6f6pX+JzZuhXj01rhGgeHH1q9+gQSrHKISVkYRUiFRkMMDXX6snd9F9wDZuVH9sbt16xc7/+x/MmJHqMQohUtjIkUl6yjF/vqqTH/0BtWZN1Qe9TJlUjk8IKyQJqRBpoGdPdSfE1VUtHz2qHtGdPPmSnRYtgj/+SIvwhBApRdNUnVF390Q3MZnUHPQDBkBUlFrXsSPs2AF58qRRnEJYGUlIhUgjjRurElBFi6rl69ehbl01BWmCXFzUbdVz52DOnDSLUwhhoago9Vjkiy9g0KAENwkPVx9QJ0yIXTd8uKrEkSVL2oQphDWShFSINPT66+qRXI0aajk4GFq1UjdDE7V2LUyfDqGhaRGiEMISz56pxx7Llye6ycOHaual339XywYD/PgjTJ0K9vZpFKcQVkoSUiHSWL584OcH7dur5chI6NsXxoxJpCzUJ5/AwYNmzX8thEhj9vbqkUd0EeLnXLoEdeqo2ZZA/TqvWQMffZSGMQphxSQhFUIHWbPCn3/C0KGx6775Bnr3jp0qMIadndrhzh0YMgTCwtI0ViHEK4SFqZJt06YlmJDu369unp47p5bz5IGdO1XFDSGEIgmpEDqxt1dP4qdOjS18/euv6hH+o0cJ7HDzJmzYILM4CWFN/vkHSpSAw4cT/PaaNar/+L17arlsWdVt54030jBGIWyAJKRC6Gz4cFi9Gpyd1fKOHerJ35Urz21YrRr8+y+UK5fGEQohElWkiBql9PrrL3xr2jR4++3Y7t8NG6qBjcWLp22IQtgCSUiFsAJvvaX6lebOrZbPnFGP+A4dem5DR0cICYFevRL4phAizZhM8OAB5MgBP/wQr493VJSaAXjEiPg18jdvVrMwCSFeZHZC6u/vT7t27ShQoAAGgwEfH59X7uPn50e1atVwcnKiVKlSLHrpkGIhMqZatdQMLa+9ppbv3FF3VNavf25DBwdVVf/27TSPUQjxn++/h+rV1QfEOEJC1F3R6dNj1331FSxZorqZCiESZnZCGhISQuXKlZk1a1aStr98+TJt2rShcePGHDt2jOHDh9O/f382b95sdrBCpHclS6qktH59tfzsGXh6wsyZcTZyclJTirZrp5YTHJovhEhVvXqp2ZiyZo1ZdeeO6i+6dq1adnCABQvUZtH9xIUQCXMwd4c333yTN998M8nbz5kzh+LFizN58mQAypUrx+7du5k6dSotW7Y09/RCpHs5c6p8s29fVa/QZFKj8S9fVrOJ2tkR+9fts8/U6Ki4VbaFEKnn0iXImxcKFoR3341ZfeYMtG4d2/c7e3Y10Vrz5vqEKYStMTshNVdAQADNmjWLt65ly5YMHz480X3Cw8MJDw+PWQ4ODgbAaDRiNBpTJc70JrqdpL3MYy3tZmcHCxdCkSJ2fP+9qpg9ZQpcumRi0aKomBld7HLlAgcHTDrGay1tZkvitpW8ryWd7teapuHQsSNa+fJELV4cs9rf30CnTvY8fqw+KBYqpLF2bSQVK4I1/Nfq3m42SNrMMslpr1RPSAMDA8mbN2+8dXnz5iU4OJjQ0FAyJ1Ds29vbm3Hjxr2wfseOHWSRudXM4uvrq3cINsla2s3DAwYNKsqcOZUwmezw8bGjZs3HfPHFfnLkiIjtcLpxI/bh4UTp2EnNWtrMFoTFqSW7fft2nKNLLIgk0fNay96vH1HOzjzbuBGAnTsLMWNGVSIjVTJavPhjvv56P9evh3H9um5hJkh+R80nbWaeZ8+eWbxvqieklvDy8mLkyJExy8HBwRQuXJjGjRvj7u6uY2S2w2g04uvrS/PmzXF0dNQ7HJthje3WujW0bm2ia1cDT58a+PffnHzzTSvWro2kTBm1jWH1auw/+4zIPXvUVFBpyBrbzNqFxBkI06RJE3LkyKFfMDZEz2vNsHEjWosWqmMoquu2t7cdU6fGzvnZqpWJ337LSvbsTdI0tleR31HzSZtZ5sGDBxbvm+oJab58+bhz5068dXfu3MHFxSXBu6MATk5OOCVwp8fR0VEuDDNJm1nG2tqtTRvYvVv9e/MmXLpkoGFDR3x8/hsA1bgx9O+PY4ECuk2KbW1tZs3itpO0m/nSvM0uXlRD53//HTp3xmiEDz+EX36J3eSDD2DmTDscHKy3mqJca+aTNjNPctoq1X9zPDw82LZtW7x1vr6+eHh4pPaphUhXKldWM7xUqqSWHz6EZs1g+XLUXdHRo1UyevmyjLwXIiWVLAlHj0KnTgQFqQ+GcZPR77+H2bNjbp4KISxgdkL69OlTjh07xrFjxwBV1unYsWNcu3YNUI/be/XqFbP9wIEDuXTpEp9++ilnz57lp59+YuXKlYwYMSJlfgIhMpBChWDXLoguUBERAd26wcSJ/+Wgd++qzHXuXF3jFCJduHgRZs1Sv1wVK3L9hoH69VUVDIBMmdQHwk8/lbJOQiSX2QnpoUOHqFq1KlWrVgVg5MiRVK1aldGjRwNw+/btmOQUoHjx4mzYsAFfX18qV67M5MmTmT9/vpR8EsJCLi6qWH7//rHrvLzUI8PInHnU8Pw45WiEEBZav15VuH/2jGPH1OxpJ06ob+XMCdu2wTvv6BqhEOmG2Q8YGjVqhPaSx4EJzcLUqFEjjh49au6phBCJcHSEefOgRAn44gu17uef4fp1WLnybbJnBW7cUHdMq1XTNVYhbNbw4dCvH3/7Z6VLF3j6VK0uUQL+/ju2yIUQIvmst/e1EOKlDAZ1Z3TZMvXoEGDTJjXI6cYN1ETaAwdKf1IhzKFpMGQIrFkDwLzfs9OuXWwyWru26sstyagQKUsSUiFsXLduqk+bm5taPn5c/dE8NWS2msNQOrcJkXSRkXD/PqYnIXz+ueoKExWlvvX227B9O+TOrW+IQqRHkpAKkQ40aAABAVC8uFq+eRM82uVi8z/5IThYjboIDdU3SCGsXXg4ODoStvB3um/syfffx37r449h5UpIpFqhECKZJCEVIp0oU0Y9SqxVSy0/eaLK0/hMvghLl8LZs/oGKIQ1270bXnuNR/v/pXkLAytWqNV2djBzJkyapL4WQqQO+fUSIh3Jk0c9UuzYUS1HRUHHb6oyttcltCpVVf846VMqxIvKluVxs07U61GU3bvVqixZwMcHBg/WNTIhMgRJSIVIZ7JkgVWr1JimaOO+d6ZnD42oAQNhyhT9ghPC2ly5Ag8fsu9CLkqvm8zpi2qWwLx5YedOaNdO3/CEyCgkIRUiHbK3V3nn9OmxjxmX/W5gqW8enmbKqW9wQlgLTYNu3bj5Zj8aN4b799XqcuVU95caNfQNT4iMRBJSIdKxoUNV9ZrogRh9ro2nxqy+XLqEGvkkRAamYWBhg4XUPzCFsDC1rnFj2LsXihXTNTQhMhxJSIVI59q3V48e8+ZVy+fOwVdVN2AqWQrOnNE3OCH0EB6Oyft7RgyO4L0fynIZVZ7i3XdVLd8cOfQNT4iMSBJSITKAN95QjyDLlVPLfwQ3Y4hpBmvOlNU3MCF0ELrrEKGjJ7Bz9qmYdWPGwOLFsZNMCCHSliSkQmQQxYrBnj3QqBFE4MRsY3/e7mRg5dBdcPKk3uEJkfo0jcBAqP95XQpFXuEYVXFwgIULYexYmUNCCD1JQipEBuLmph5J9uypljVNo+DMzzn89oSY2WiESJc0jUddPmBp2W85fBge44aLi/p96NNH7+CEEJKQCpHBODnBkiXw9dcABjqwlrr//sLbb0NIiN7RCZE6dvgZmLG+GCeCCgNQuLB6YtC0qc6BCSEAcNA7gNRiNBqJysC3fIxGIw4ODoSFhWXodjBXQu1mb2+Po6OjzpGlLIMBvvlGTTX6/vu5iIyEY2uvcKLAB5T0X0juygX0DlGIlKFprPv+DJ1Gv47R+AUA1arB+vVQQC5zIaxGuktIg4ODuX//PuHh4XqHoitN08iXLx/Xr1/HIB2jkiyxdnNyciJXrly4uLjoGF3K69tX3Sl6+22wCzYREhxJh7ZRLNgSOwBKCFulafBnl+W0W92bQpzlMiVo3RpWrIBs2fSOTggRV7pKSIODg7l58ybZsmUjV65cODo6ZthkzGQy8fTpU7Jly4adTMCcZM+3m6ZpGI1GgoKCuPlf3c70lpQ2a6am8W7TpgTNrm+DG9DMI4RV84Oo00luIQnbFBEBH3wAS1d3ohXZuEwJBg6EGTPAIV395RMifUhXv5b3798nW7ZsFCpUKMMmotFMJhMRERE4OztLQmqGhNotc+bMZM+enRs3bnD//v10l5ACVKyoykK1bQtHj8LEoIFk7nyKpYsP0bOXXD/CtgQ91lheczIHz7cikgr8RTt++AE++URG0gthrdLNXxqj0Uh4eDiurq4ZPhkVKc9gMODq6kp4eDhGo1HvcFJFgQLg7w+tW8PXjGcQs3i3tx3ffqsefQphC65dg6Z1Qql1filN2YaTE6xcCaNGSTIqhDVLNwlp9ACU9Db4RFiP6GsrPQ8Sy5YN1q6FNwcWYx8egIbx63F82vUa6TQPF+nIkcMaTd8I5vCZLHgQwG/uw9i2DTp31jsyIcSrpJuENJrcHRWpJaNcWw4O8NNP8MMPkIv79GYx11YG0Lo1BAXpHZ0QCduwAfw8Puf3u02wJ5JCpTITEAB16+odmRAiKdJVH1IhRMowGNQjzqJFc1P93ZM8jsgCW6GtxwOWbXancGG9IxQi1uzZMGQIlDf14DCVqOnhwLp1kCuX3pEJIZIq3d0hFUKknC5d4K/tWXB3hw74sO5MKTpXv8TRo3pHJgSYTOD1cQTnBk3DYIrkBJUwdu7Btm2SjAphayQhFUK8VN26EBAAV4o3YRxj2H+vOPXrw8aNekcmMrKwMOjaFfymHOY7vqQqRxk1CpYvh8yZ9Y5OCGEuixLSWbNmUaxYMZydnalVqxYHDhxIdNtFixZhMBjivZydnS0OWAiR9kqXBt/9LhzwGA4YqBbiz7K2y5g7V+/IREZ0/z60axjMqlWwDw9KGi7Td9Yb/PADSJU7IWyT2b+6K1asYOTIkYwZM4YjR45QuXJlWrZsyd27dxPdx8XFhdu3b8e8rl69mqygReL8/PwwGAyMGzcuVY8/duzYVDm+OTRNo3r16rRo0cLsfc+dO4eDgwM//fRTKkSWPuXOTcyI5U6spo/2Cx8ONOHlZYfJpHd0IqO4fTsrreuGMO9AZQYym6xZYcH6PAwapHdkQojkMDshnTJlCgMGDKBv3768/vrrzJkzhyxZsvDLL78kuo/BYCBfvnwxr7x58yYraCEAlixZwpEjR/jmm2/M3rdMmTJ069aNcePG8eTJk1SILn3KnFk9Er35yTTasw4NOx5MXkzYkL8JC0m/5bCEdTj5y0Hchi7g5OWsTGUEB3O3YedOaNNG78iEEMll1ij7iIgIDh8+jJeXV8w6Ozs7mjVrRkBAQKL7PX36lKJFi2IymahWrRoTJkygfPnyiW4fHh4eby764OBgQBW/T6woudFoRNM0TCYTpgx8uyb6Z9f+q2Qe3SapcXw929lkMjF27Fjq169PzZo1LYrlk08+YenSpfz444988cUXwMvbzWQyxUwlam9vn/wfwoZ9OwEKFXFi6Uf7WUB/7G5p7C5xg1L7l+BeLLve4Vm9uO9jL3tfE7H2f7KaN6b3oSoRPCQHM8vNYt36KIoUMUqN3FeIvr7kOks6aTPLJKe9zEpI79+/T1RU1At3OPPmzcvZs2cT3KdMmTL88ssvVKpUiaCgICZNmkSdOnU4deoUhQoVSnAfb2/vBB8579ixgyxZsiT8gzg4kC9fPp4+fUpERIQ5P1a68uzZM4CYNkjpu3/Rxw8PD4/5oKCHzZs3c+XKFUaMGGFxHEWLFqV8+fLMmzePQYMGxZtiNaF2i4iIIDQ0FH9/fyIjIy2OPb0oUgQ+ar2fEhtVEn/60QauvN6ArWO9yF4hm87RWbewsLCYr7dv3y796l9CM2kYx2ynzYkZlPlv3dIsR/h6lA8nT9pz8qSu4dkUX19fvUOwOdJm5onOESyR6nVIPTw88PDwiFmuU6cO5cqVY+7cuYwfPz7Bfby8vBg5cmTMcnBwMIULF6Zx48a4u7snuE9YWBjXr18nW7ZsGfrNPTphz5QpE0ePHuW7775j//792NnZ0bhxY6ZMmUKxYsVitl+0aBH9+vVjwYIF9OnTJ96x/Pz8aNq0KaNHj2bMmDHxju/k5MQ///zD6NGjOXz4MPb29jRp0oSJEydSqlSpBGPz9/dn0qRJ7Nu3jydPnlCkSBG6dOmCl5dXvA8acc/bvHlzxo0bx8GDBwkKCoqZJWnlypUYDAZ69OjxwtzylSpV4tSpU4m20ZgxYxg9ejQAXbt25euvv+bw4cM0bdoUTdN48uQJ2bNnf6EQflhYGJkzZ6ZBgwYZ+hqLp3VrKv9Uj0IjepBFe8zrkafI9/VHXJvlQ/n+tfWOzmqFhITEfN2kSRNy5MihXzBW7NnDME68MYB611cAcAXYlK8TFY7/TC23rLrGZkuMRiO+vr40b95cZjNMImkzyzx48MDifc1KSHPlyoW9vT137tyJt/7OnTvky5cvScdwdHSkatWqXLhwIdFtnJyccHJySnDfxC6MqKgoDAYDdnZ28e50ZTTRP/uhQ4eYNGkSjRo14oMPPuDo0aOsXbuWkydPcvLkyZiEKnr7hNotejm6XeOu279/PxMnTqRVq1YMHTqUU6dO4ePjw+7du9m3bx8lSpSId6zZs2czePBgcuTIQbt27ciTJw+HDh1iwoQJ+Pn5sWPHDjJlyhTvHAEBAXh7e9O4cWPef/99rl27hp2dHZqm4efnR5kyZRL8gNKtW7cXHhtEREQwbdo0QkNDadiwYcw56tSpA6i7782bN495TB/3Z47bHgaD4aXXYUZUZlBzfJ5MovY3EyhsvERO7SFZBjXn8OkF1J7RQ+/wrFLc60eup4TdPHKH+w06Ui9EdQczYWB78wmEfViWrG5Zpc0sINea+aTNzJOsttLMVLNmTW3IkCExy1FRUVrBggU1b2/vJO0fGRmplSlTRhsxYkSSzxkUFKQB2v379xPdJjQ0VDt9+rQWGhqa5OOmRzt27NAADdAWLFigRUVFxXzv3Xff1QDt999/j1m3cOFCDdAWLlyY6LHGjBmT4PHnzJkTb/s5c+ZogNa2bdt460+dOqU5ODholStXfuH/0NvbWwO0SZMmJXiOX3755YW4Tp06pQFajx49ktQmYWFhWuvWrTWDwaDNnj073veir60GDRpomqau50ePHsVrt2hyjSUsIiJC8/Hx0e7/G6gdydFY0yDmtbWWl2YMi9Q7RKvz9OnTmGv80aNHeodjdY7/vF+7YVco5joKxVnb77Um5lqLiIjQO0SbIu1mPmkzy9y/f18DtKCgILP3NfuR/ciRI+nduzc1atSgZs2aTJs2jZCQEPr27QtAr169KFiwIN7e3gB888031K5dm1KlSvH48WP+97//cfXqVfr37295Fm2BGjUgMDBNT2m2fPng0KGUOVaDBg1466234q177733+PXXXzl48CBdu3ZN1vFfe+01BgwYEG/dgAEDmDx5Mhs2bODevXvkzp0bgLlz5xIZGcmMGTNeuKP56aefMmXKFH7//Xc+/vjjeN+rVq1azHUV140bNwCSVK0hNDQUT09Ptm7dys8//0y/fv3ifd/FxQVnZ+eYYwrzhIaGUr9+fYKCgjh8+DDlb2xmZ7XBNPz3ZwCa7vfmQIEjFN/zG7nLJtzdRoi4dvddQK1FH+KIespx274gz5avp2Kbsnh4eBAUFETjxo3lrpUQ6YzZCek777zDvXv3GD16NIGBgVSpUoVNmzbFJAfRj1WjPXr0iAEDBhAYGIibmxvVq1dn7969vP766yn3UyRBYCDcvJmmp9RVtWrVXlgXPYjs8ePHyT5+3bp1E3ykXbduXc6fP8/x48dp1qwZAPv27QPUQKRt27a9cCxHR8cEB8W98cYbCZ47uo/Kq/rdPXv2jPbt27Njxw4WLlxIr169EtwuZ86c3L9//6XHEgkzmUwcPnw45utMLo40ODOXXZ1fp86fH2OPiZoPN3OrQhVOLvChQu/qOkcsrFVYUDgH63xE/dPzYtaddPGgwN4/yF8+PyEhIfGuNSFE+mLRoKYhQ4YwZMiQBL/n5+cXb3nq1KlMnTrVktOkqCR2cdVVSsb4/EAfUJUIgJiBQcmR2N3J6PVBQUEx6x4+fAjAd999lyLnyPzfvIBxRyo/LyQkhDZt2rB7925+/fVXunfvnui2oaGhiVZvEOYz2Bmo/8dwTs6oTL7h75DLdI8CUTdw71OXndt/osGi93huvJjI4C7vvEbom29RP/RwzLqdFYdQJ2Ayjlkz6RiZECKtpPooe2uRUo/C05vou5wJlTGKm1Q+7/mBbc+vd3V1jVkXnRwHBweTPXvSa1Q+P8o9WnRXgOhE93lPnjyhdevW7Nu3j99//53OnTsneg6TyURQUNBL6+IKy1QY2pg7dY5wutFbvP70IE6E03BJP3bv3UXV3TPImldKQwnYPWot5Sf1pTiPAAjDif3vzaPhgoSfaAgh0qeMOxxdAODm5gbAzQT6Mxw9ejTR/fbs2ZNg4fi9e/diMBioXLlyzPpatWoBsY/uk6t8+fLY2dlx7ty5F74XFBREixYt2L9/P6tWrXppMgpw/vx5TCYTFStWTJHYRHx5qxeidOBu/CsNjllX78Ii7hauxullx/QLTOguLCgcv0pDqTfJE7f/ktGbDkW5viJAklEhMiBJSDO46tWrYzAYWL58ebxH4OfPn+fHH39MdL9///2Xn3/+Od66n3/+mX///Zc2bdrE3MUEGDRoEA4ODgwdOpRr1669cKzHjx+/NPl9Xo4cOahUqRKHDh2KlxQ/evSIZs2acfToUf788088PT1feaz9+/cD0LBhwySfX5jHMWsmGhyfyZ6BvxKKKjdW3Hiekj1qsbXDdExRms4RirR2xfc8l/PXodGJmTHrDhR+G9fLxyjdpaqOkQkh9JJhHtmLhBUoUIBu3bqxbNkyqlevTqtWrbh79y5r1qyhVatW/PHHHwnu17JlSz766CM2btxI+fLlOXXqFOvXrydXrlwvJLIVKlTgp59+4sMPP6RMmTK0bt2akiVL8uTJEy5dusTOnTvp06cPc+bMSXLcHTt2ZMyYMezbty+mlmjPnj05dOgQDRs25NChQxx6rp9Gnjx5GDRoULx1vr6+ODg40LZt2ySfW1im7uyeXHmrFqGeXSn37AhORNBs3TD2591Eka0LyV/l1VUThG0zRWns7PkztZYPJwuhAISTiUM9plFnyUAMdtK5WIiMShJSwfz588mVKxcrVqxg1qxZlClThnnz5lGgQIFEE9LatWvz1Vdf8dVXXzF9+nTs7e3x9PTkhx9+eKEoPqiSUFWqVGHKlCn4+/uzfv16XF1dKVKkCCNGjKB3795mxdy/f3/Gjx/P0qVLqVOnDiaTCX9/fwB27tzJzp07X9inQ4cO8RLSZ8+e4ePjQ9u2bSlQoIBZ5xexcuXKleTpeos1L01E4F52Nf2C+genAFDrwd/cr1aBA5/MpuYPnVIzVKGjmwdvcfPN/jR+8HfMukuOZYj8bQV1O1d+yZ6xzLnWhBA2JhXqoqY4KYxvvpcVeE8vevbsqbm5uWnBwcEW7f/zzz9rgLZz586YdVIY33yWFpA+NmGDds8ud7xC+nuLdNHunU389zy9yEiF8U0mTdsxcLn2GNd4/9e7y/XXntx+YtaxpFi5ZaTdzCdtZpnkFMaXPqTCZn377beEhoYyY8YMs/eNjIxkwoQJtG/fngYNGqRCdOJVKnu1xu7USfYViJ3AwePaSqLKVcBv5Do06Vpq8+79c5u9BTvTaE5XXFFVO+7Y5efI+A3UPf0z2fJJpQUhhCIJqbBZRYsWZfHixWaVkop27do1evXqxZQpU1IhMpFUOcvmodb11fh/8BuPDKriQ14tkEZTO7Avnye3Dmag2SzSkSijiZ095uJUuSx1b6+OWb+/aBecz5+g2letdYxOCGGNpA+psGldunSxaL8SJUowduzYlA0mAwoNDaVVq1Y8ePDA4ukcDXYGGszpzr0PG3H4zfepfnsDAB531xJcczu7On9H3d8GYedon9Lhi1RwevVpjH3fp+HTPTHr7htyc3nkDGpNesfi46bEtSaEsF5yh1QIYbHowWSnTp1K9nSOuSsXoPrN9Rwa8Rv37fIA4MIT6q/6iH9da3Ds54MpEbJIJUHXg9labRSlOlehcpxk1L9EbzhzhjeSkYxCyl5rQgjrIwmpEMJ6GAzUmNIdxwtn8C87IGZ12dBjVHm/Jv7Fe3Pz4C0dAxTPM0Wa2PfBQsKLvUazo5PIhBGAq46lOD5lGw0uLiJXGXedoxRCWDtJSIUQVse1eE4anJnH0em7uOhULmZ9gytLcKtZim1NvuPZg1AdIxQAB2cE8K9LDWrPe488JjVtcASO7G3yFfnv/UPlEU10jlAIYSskIRVCWK2qQ+tR7PFx9nSeRpDBFYAshNJ0x1c8zFuW3f0WEhkWqXOUGc/ZP06xJ29H3vioDmVDY2dZ25v/Le7tOkedbePJ5JpZxwiFELYm3SWkmtSKEalEri192Ds7UnflMLhwkd2VBxOJGtxUKOoa9X55j6suFdg1ZAVRRulXmNpu7L7C7hK9eK1TRere9YlZf8mpLAcmbKXOrT8oWK+4fgEKIWxWuklI7e3VHymj0ahzJCK9ir62oq81kbZcS7hT79hMrq07zqFcrWLWlzSeo/6srlzKVhH/kT6SmKaCy9sv41duIHnqv0a9y79ih/pwFmhfgH195lAs+AQ1vZrqHKUQwpalm4TU0dERJycngoKC5E6WSHGaphEUFISTk5OUm3lOlixZcHJySrPzlWhXnhr3/uafWbs4niN2UoPSEadpMLUjV7JVIOCDRUQ8lSkmk+vM6lP4F32Xwk1L0+js3JgBS48Mbvi3+Z4cd89Te+EH2GVKmwqCaX2tCSHSTrqqQ5orVy5u3rzJjRs3cHV1xdHREYPBoHdYujCZTERERBAWFoadXbr53JHqnm83TdMwGo0EBQXx9OlTChYsqHeIViVr1qw8fvyYjRs3kjVr1jQ9d6VB9eBDP45P8iXzuM95LUT1ZSwZcYaS8/oSOP9LTrQaRbWZ7+Fe3CVNY7Nlmknj+PSdhE2YTO17f1EuzvdCyMKBOsOptmwUDYrmSNO49LzWhBCpL10lpC4u6o/O/fv3uXkzY8/womkaoaGhZM6cOcMm5ZZIrN2cnJwoWLBgzDUmrITBQOVRLdA+bs4x743ww/dUCd4FQD7TLfJtHEFIia/wK/cuhSYMppRnBZ0Dtl5Bt0I4+vFSCvrMpErYyXjfe2jIycmmw6k8fwiNi7rpFKEQIj1LVwkpqKTUxcUFo9FIVFSU3uHoxmg04u/vT4MGDeQRsxkSajd7e3tpQytnsDNQ5cs2aF+04cjMPURO+J6agesByEoIjc7MgY5z+Me1Po+7fUi1bzzJlltGgWsmjVO/HSNw4iJqnF5Mo//mm492zy4v5zp+RvXZA2iQW+adF0KknnSXkEZzdHTM0EmEvb09kZGRODs7Z+h2MJe0m3nCwsJ46623uHv3Lk2aNNG9zQwGqDa0Lgxdx/VNp7j+6XSqnviVzKiapZWCdsGcXQTNcWFnyU5kHfguVYc1wN4xY3VruXUkkAvjfqPg5l+oEH6a5+8b/5OtDsG9hlD7f29TL0smXWJ8nrVda0KIlJVuE1IhROqLiori77//jvnamhRuVZ7CreYSfP0H9o/4lULrf6JUxBkAXAmm4cVfYNQv3P6sAOeq98CtTwfK96uNg1P6rKJw8+Atzn7vg5vvSioF76YA8f+/wnDiaNnuuI8ZQqWu1XSKMnHWfK0JIZJPElIhRLrmUtiVRquHoJkGc/KnnQTPXEzFc6vJzlMA8ptukf/g/+Dg/3g0xI3jJTpi19GTSsMak6OQ7T6mjjKaOLvyH+7+toU8/qspH3KQhIbkncr6Bo/feo8qE97Bo5D0DxVC6MOi51SzZs2iWLFiODs7U6tWLQ4cOPDS7VetWkXZsmVxdnamYsWKbNy40aJghRDCUgY7AxWGNKLO2YU4PrjD/uG/czh3q5hC+wBu2iMaXfyFBpPak61wDk65eODXYDQHf9jB48AwHaN/Nc2kcXHzBbb3WMDuQu/w2Ckv5XtWpfHfn1E+5GC8bW85FmFX/S+4uOEs5Z8eoO6SgWSVZFQIoSOz75CuWLGCkSNHMmfOHGrVqsW0adNo2bIl586dI0+ePC9sv3fvXrp164a3tzdt27Zl2bJleHp6cuTIESpUkBGvQoi055wzC7WmdoWpXXlw7j5nJm8g0wYfKtzaTJb/+ps6EEX5J/tg1z7YNZ7Iz+w55VyZ+8Vr4lC7Bu6t3qBIy3JkcU37voyaSePqvttc3/APEbv243omgNIP9lNSe0zJRPa56FSOmx6dKTzsLYp3qEQBqb4hhLAiZiekU6ZMYcCAAfTt2xeAOXPmsGHDBn755Rc+//zzF7b/8ccfadWqFaNGjQJg/Pjx+Pr6MnPmTObMmZPM8IUQInncy+Si3rzeQG8igkI5PmMrT1dtpPDpLRSJvBSznQNRlA87AmeOwBlgIRhx4JJDCe7lKE1I0XLYl32NrGUKkbV0AXK8XgD319zJ5GzZgKnwoDBuHbrFg+M3eHL2BpEXr+F09Rzud89Q6MkZihFMsZfsH0JWTudvSli9ZhQe0IqSzUsnmqwKIYTezEpIIyIiOHz4MF5eXjHr7OzsaNasGQEBAQnuExAQwMiRI+Ota9myJT4+PmYHGxISgrOzs9n7ZURGo5GwsDBCQkJkNKoZpN3MExISEu9rm28zByg1ogmMaALA+UO3uP7bTgz+O8l7eT+FI84/188pkryR/5L3/r9wfwMcjn+4Z9gTiCuhDi6EOLoS7pCVCHtnnsaZrGJ/qbdxjjLgFPGE7BEPyBz1FGftGa48IQ/w4nMnJeS55SBcuOBek7AqHuTu3IhSXarzepwZlOL+X9midHetpSF5XzOftJllkvM+Y1ZCev/+faKiosibN2+89Xnz5uXs2bMJ7hMYGJjg9oGBgYmeJzw8nPDw8JjloCBVG69o0aLmhCuESEOFChXSOwQrFAU8hMiHEJnwFq0ebE+hcwXDg62wbStsGw8DU+iwVkiuNSGsmyVTuFtl8T1vb29cXV1jXkWKFNE7JCGEEEIIkQQPHjwwex+z7pDmypULe3t77ty5E2/9nTt3yJcvX4L75MuXz6ztAby8vOI95n/8+DFFixbl2rVruLq6mhNyhhUcHEzhwoW5fv26THdpBmk380mbWUbazXzSZpaRdjOftJllgoKCKFKkCDlz5jR7X7MS0kyZMlG9enW2bduGp6cnACaTiW3btjFkyJAE9/Hw8GDbtm0MHz48Zp2vry8eHh6JnsfJyQknJ6cX1ru6usqFYaboqVSFeaTdzCdtZhlpN/NJm1lG2s180maWsbMz/wG82aPsR44cSe/evalRowY1a9Zk2rRphISExIy679WrFwULFsTb2xuAYcOG0bBhQyZPnkybNm1Yvnw5hw4dYt68eWYHK4QQQggh0h+zE9J33nmHe/fuMXr0aAIDA6lSpQqbNm2KGbh07dq1eJlxnTp1WLZsGV999RVffPEFpUuXxsfHR2qQCiGEEEIIwMKpQ4cMGZLoI3o/P78X1nXu3JnOnTtbcipAPcIfM2ZMgo/xRcKkzSwj7WY+aTPLSLuZT9rMMtJu5pM2s0xy2s2gWTI2XwghhBBCiBRilWWfhBBCCCFExiEJqRBCCCGE0JUkpEIIIYQQQleSkAohhBBCCF3ZbEIaHh5OlSpVMBgMHDt2TO9wrF779u0pUqQIzs7O5M+fn3fffZdbt27pHZbVunLlCv369aN48eJkzpyZkiVLMmbMGCIiIvQOzap999131KlThyxZspAjRw69w7Fas2bNolixYjg7O1OrVi0OHDigd0hWzd/fn3bt2lGgQAEMBgM+Pj56h2T1vL29eeONN8iePTt58uTB09OTc+fO6R2W1Zs9ezaVKlWKKYjv4eHB33//rXdYNmXixIkYDIZ4EyIlhc0mpJ9++ikFChTQOwyb0bhxY1auXMm5c+f4448/uHjxIp06ddI7LKt19uxZTCYTc+fO5dSpU0ydOpU5c+bwxRdf6B2aVYuIiKBz5858+OGHeoditVasWMHIkSMZM2YMR44coXLlyrRs2ZK7d+/qHZrVCgkJoXLlysyaNUvvUGzGzp07GTx4MPv27cPX1xej0UiLFi0ICQnROzSrVqhQISZOnMjhw4c5dOgQTZo0oUOHDpw6dUrv0GzCwYMHmTt3LpUqVTJ/Z80Gbdy4UStbtqx26tQpDdCOHj2qd0g2Z+3atZrBYNAiIiL0DsVm/PDDD1rx4sX1DsMmLFy4UHN1ddU7DKtUs2ZNbfDgwTHLUVFRWoECBTRvb28do7IdgLZmzRq9w7A5d+/e1QBt586deodic9zc3LT58+frHYbVe/LkiVa6dGnN19dXa9iwoTZs2DCz9re5O6R37txhwIAB/Prrr2TJkkXvcGzSw4cP+e2336hTpw6Ojo56h2MzgoKCyJkzp95hCBsWERHB4cOHadasWcw6Ozs7mjVrRkBAgI6RifQuKCgIQN7DzBAVFcXy5csJCQnBw8ND73Cs3uDBg2nTpk289zdz2FRCqmkaffr0YeDAgdSoUUPvcGzOZ599RtasWXF3d+fatWusXbtW75BsxoULF5gxYwYffPCB3qEIG3b//n2ioqJiplqOljdvXgIDA3WKSqR3JpOJ4cOHU7duXZm2OwlOnDhBtmzZcHJyYuDAgaxZs4bXX39d77Cs2vLlyzly5Aje3t4WH8MqEtLPP/8cg8Hw0tfZs2eZMWMGT548wcvLS++QrUJS2y3aqFGjOHr0KFu2bMHe3p5evXqhZbCJusxtM4CbN2/SqlUrOnfuzIABA3SKXD+WtJkQwnoMHjyYkydPsnz5cr1DsQllypTh2LFj7N+/nw8//JDevXtz+vRpvcOyWtevX2fYsGH89ttvODs7W3wcq5g69N69ezx48OCl25QoUYIuXbqwfv16DAZDzPqoqCjs7e3p0aMHixcvTu1QrUpS2y1TpkwvrL9x4waFCxdm7969GepRhLltduvWLRo1akTt2rVZtGgRdnZW8RkuTVlynS1atIjhw4fz+PHjVI7OtkRERJAlSxZWr16Np6dnzPrevXvz+PFjeWqRBAaDgTVr1sRrP5G4IUOGsHbtWvz9/SlevLje4dikZs2aUbJkSebOnat3KFbJx8eHjh07Ym9vH7MuKioKg8GAnZ0d4eHh8b6XGIfUDDKpcufOTe7cuV+53fTp0/n2229jlm/dukXLli1ZsWIFtWrVSs0QrVJS2y0hJpMJUOWzMhJz2uzmzZs0btyY6tWrs3DhwgyZjELyrjMRX6ZMmahevTrbtm2LSahMJhPbtm1jyJAh+gYn0hVN0xg6dChr1qzBz89PktFkMJlMGe5vpTmaNm3KiRMn4q3r27cvZcuW5bPPPktSMgpWkpAmVZEiReItZ8uWDYCSJUtSqFAhPUKyCfv37+fgwYPUq1cPNzc3Ll68yNdff03JkiUz1N1Rc9y8eZNGjRpRtGhRJk2axL1792K+ly9fPh0js27Xrl3j4cOHXLt2jaioqJgawaVKlYr5fc3oRo4cSe/evalRowY1a9Zk2rRphISE0LdvX71Ds1pPnz7lwoULMcuXL1/m2LFj5MyZ84W/C0IZPHgwy5YtY+3atWTPnj2mj7KrqyuZM2fWOTrr5eXlxZtvvkmRIkV48uQJy5Ytw8/Pj82bN+sdmtXKnj37C32To8ermNVnOcXH/aehy5cvS9mnJPjnn3+0xo0bazlz5tScnJy0YsWKaQMHDtRu3Lihd2hWa+HChRqQ4Eskrnfv3gm22Y4dO/QOzarMmDFDK1KkiJYpUyatZs2a2r59+/QOyart2LEjweuqd+/eeodmtRJ7/1q4cKHeoVm19957TytatKiWKVMmLXfu3FrTpk21LVu26B2WzbGk7JNV9CEVQgghhBAZV8bsFCeEEEIIIayGJKRCCCGEEEJXkpAKIYQQQghdSUIqhBBCCCF0JQmpEEIIIYTQlSSkQgghhBBCV5KQCiGEEEIIXUlCKoQQQgghdCUJqRBCCCGE0JUkpEIIIYQQQleSkAohRBqaMGECBoPhhde0adP0Dk0IIXQjc9kLIUQaevLkCSEhITHLo0ePZsuWLezevZtChQrpGJkQQujHQe8AhBAiI8mePTvZs2cH4Ouvv2bLli34+flJMiqEyNDkkb0QQuhg9OjR/Prrr/j5+VGsWDG9wxFCCF1JQiqEEGlszJgxLFmyRJJRIYT4jySkQgiRhsaMGcPixYslGRVCiDikD6kQQqSRb7/9ltmzZ7Nu3TqcnZ0JDAwEwM3NDScnJ52jE0II/cgoeyGESAOappEjRw6Cg4Nf+N6BAwd44403dIhKCCGsgySkQgghhBBCV9KHVAghhBBC6EoSUiGEEEIIoStJSIUQQgghhK4kIRVCCCGEELqShFQIIYQQQuhKElIhhBBCCKErSUiFEEIIIYSuJCEVQgghhBC6koRUCCGEEELoShJSIYQQQgihK0lIhRBCCCGEriQhFUIIIYQQuvo/dB4EdVVVbk0AAAAASUVORK5CYII=",
|
||
"text/plain": [
|
||
"<Figure size 800x350 with 1 Axes>"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows what the Huber loss looks like\n",
|
||
"\n",
|
||
"import matplotlib.pyplot as plt\n",
|
||
"\n",
|
||
"plt.figure(figsize=(8, 3.5))\n",
|
||
"z = np.linspace(-4, 4, 200)\n",
|
||
"z_center = np.linspace(-1, 1, 200)\n",
|
||
"plt.plot(z, huber_fn(0, z), \"b-\", linewidth=2, label=\"huber($z$)\")\n",
|
||
"plt.plot(z, z ** 2 / 2, \"r:\", linewidth=1)\n",
|
||
"plt.plot(z_center, z_center ** 2 / 2, \"r\", linewidth=2)\n",
|
||
"plt.plot([-1, -1], [0, huber_fn(0., -1.)], \"k--\")\n",
|
||
"plt.plot([1, 1], [0, huber_fn(0., 1.)], \"k--\")\n",
|
||
"plt.gca().axhline(y=0, color='k')\n",
|
||
"plt.gca().axvline(x=0, color='k')\n",
|
||
"plt.text(2.1, 3.5, r\"$\\frac{1}{2}z^2$\", color=\"r\", fontsize=15)\n",
|
||
"plt.text(3.0, 2.2, r\"$|z| - \\frac{1}{2}$\", color=\"b\", fontsize=15)\n",
|
||
"plt.axis([-4, 4, 0, 4])\n",
|
||
"plt.grid(True)\n",
|
||
"plt.xlabel(\"$z$\")\n",
|
||
"plt.legend(fontsize=14)\n",
|
||
"plt.title(\"Huber loss\", fontsize=14)\n",
|
||
"plt.show()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"To test our custom loss function, let's create a basic Keras model and train it on the California housing dataset:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 72,
|
||
"metadata": {
|
||
"tags": []
|
||
},
|
||
"outputs": [],
|
||
"source": [
|
||
"# extra code – loads, splits and scales the California housing dataset, then\n",
|
||
"# creates a simple Keras model\n",
|
||
"\n",
|
||
"from sklearn.datasets import fetch_california_housing\n",
|
||
"from sklearn.model_selection import train_test_split\n",
|
||
"from sklearn.preprocessing import StandardScaler\n",
|
||
"\n",
|
||
"housing = fetch_california_housing()\n",
|
||
"X_train_full, X_test, y_train_full, y_test = train_test_split(\n",
|
||
" housing.data, housing.target.reshape(-1, 1), random_state=42)\n",
|
||
"X_train, X_valid, y_train, y_valid = train_test_split(\n",
|
||
" X_train_full, y_train_full, random_state=42)\n",
|
||
"\n",
|
||
"scaler = StandardScaler()\n",
|
||
"X_train_scaled = scaler.fit_transform(X_train)\n",
|
||
"X_valid_scaled = scaler.transform(X_valid)\n",
|
||
"X_test_scaled = scaler.transform(X_test)\n",
|
||
"\n",
|
||
"input_shape = X_train.shape[1:]\n",
|
||
"\n",
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Dense(30, activation=\"relu\", kernel_initializer=\"he_normal\",\n",
|
||
" input_shape=input_shape),\n",
|
||
" tf.keras.layers.Dense(1),\n",
|
||
"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 73,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model.compile(loss=huber_fn, optimizer=\"nadam\", metrics=[\"mae\"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 74,
|
||
"metadata": {
|
||
"scrolled": true
|
||
},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 0.4858 - mae: 0.8357 - val_loss: 0.3479 - val_mae: 0.6527\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.2415 - mae: 0.5419 - val_loss: 0.2630 - val_mae: 0.5473\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x1c7b5218c70>"
|
||
]
|
||
},
|
||
"execution_count": 74,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Saving/Loading Models with Custom Objects"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 75,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_loss\\assets\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_loss\\assets\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"model.save(\"my_model_with_a_custom_loss\") # extra code – saving works fine"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 76,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model = tf.keras.models.load_model(\"my_model_with_a_custom_loss\",\n",
|
||
" custom_objects={\"huber_fn\": huber_fn})"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 77,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 0.2052 - mae: 0.4910 - val_loss: 0.2210 - val_mae: 0.4946\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.1888 - mae: 0.4683 - val_loss: 0.2021 - val_mae: 0.4773\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x1c7b68814b0>"
|
||
]
|
||
},
|
||
"execution_count": 77,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 78,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"def create_huber(threshold=1.0):\n",
|
||
" def huber_fn(y_true, y_pred):\n",
|
||
" error = y_true - y_pred\n",
|
||
" is_small_error = tf.abs(error) < threshold\n",
|
||
" squared_loss = tf.square(error) / 2\n",
|
||
" linear_loss = threshold * tf.abs(error) - threshold ** 2 / 2\n",
|
||
" return tf.where(is_small_error, squared_loss, linear_loss)\n",
|
||
" return huber_fn"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 79,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model.compile(loss=create_huber(2.0), optimizer=\"nadam\", metrics=[\"mae\"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 80,
|
||
"metadata": {
|
||
"scrolled": true
|
||
},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 0.2051 - mae: 0.4598 - val_loss: 0.2249 - val_mae: 0.4582\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.1982 - mae: 0.4531 - val_loss: 0.2035 - val_mae: 0.4527\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x1c7b7a35f90>"
|
||
]
|
||
},
|
||
"execution_count": 80,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 81,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_loss_threshold_2\\assets\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_loss_threshold_2\\assets\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"model.save(\"my_model_with_a_custom_loss_threshold_2\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 82,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model = tf.keras.models.load_model(\"my_model_with_a_custom_loss_threshold_2\",\n",
|
||
" custom_objects={\"huber_fn\": create_huber(2.0)})"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 83,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 0.1935 - mae: 0.4465 - val_loss: 0.2020 - val_mae: 0.4410\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.1899 - mae: 0.4422 - val_loss: 0.1867 - val_mae: 0.4399\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x1c7b54ef1f0>"
|
||
]
|
||
},
|
||
"execution_count": 83,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 84,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class HuberLoss(tf.keras.losses.Loss):\n",
|
||
" def __init__(self, threshold=1.0, **kwargs):\n",
|
||
" self.threshold = threshold\n",
|
||
" super().__init__(**kwargs)\n",
|
||
"\n",
|
||
" def call(self, y_true, y_pred):\n",
|
||
" error = y_true - y_pred\n",
|
||
" is_small_error = tf.abs(error) < self.threshold\n",
|
||
" squared_loss = tf.square(error) / 2\n",
|
||
" linear_loss = self.threshold * tf.abs(error) - self.threshold**2 / 2\n",
|
||
" return tf.where(is_small_error, squared_loss, linear_loss)\n",
|
||
"\n",
|
||
" def get_config(self):\n",
|
||
" base_config = super().get_config()\n",
|
||
" return {**base_config, \"threshold\": self.threshold}"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 85,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"# extra code – creates another basic Keras model\n",
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Dense(30, activation=\"relu\", kernel_initializer=\"he_normal\",\n",
|
||
" input_shape=input_shape),\n",
|
||
" tf.keras.layers.Dense(1),\n",
|
||
"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 86,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model.compile(loss=HuberLoss(2.), optimizer=\"nadam\", metrics=[\"mae\"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 87,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 0.6492 - mae: 0.8468 - val_loss: 0.5093 - val_mae: 0.6723\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.2912 - mae: 0.5552 - val_loss: 0.3715 - val_mae: 0.5683\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x1c7bad66860>"
|
||
]
|
||
},
|
||
"execution_count": 87,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 88,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_loss_class\\assets\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_loss_class\\assets\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"model.save(\"my_model_with_a_custom_loss_class\") # extra code – saving works"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 89,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model = tf.keras.models.load_model(\"my_model_with_a_custom_loss_class\",\n",
|
||
" custom_objects={\"HuberLoss\": HuberLoss})"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 90,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 0.2416 - mae: 0.5034 - val_loss: 0.2922 - val_mae: 0.5057\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.2173 - mae: 0.4774 - val_loss: 0.2503 - val_mae: 0.4843\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x1c7bb0de6b0>"
|
||
]
|
||
},
|
||
"execution_count": 90,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows that loading worked fine, the model can be used normally\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 91,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"2.0"
|
||
]
|
||
},
|
||
"execution_count": 91,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.loss.threshold # extra code – the treshold was loaded correctly"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Other Custom Functions"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 92,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"def my_softplus(z):\n",
|
||
" return tf.math.log(1.0 + tf.exp(z))\n",
|
||
"\n",
|
||
"def my_glorot_initializer(shape, dtype=tf.float32):\n",
|
||
" stddev = tf.sqrt(2. / (shape[0] + shape[1]))\n",
|
||
" return tf.random.normal(shape, stddev=stddev, dtype=dtype)\n",
|
||
"\n",
|
||
"def my_l1_regularizer(weights):\n",
|
||
" return tf.reduce_sum(tf.abs(0.01 * weights))\n",
|
||
"\n",
|
||
"def my_positive_weights(weights): # return value is just tf.nn.relu(weights)\n",
|
||
" return tf.where(weights < 0., tf.zeros_like(weights), weights)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 93,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"layer = tf.keras.layers.Dense(1, activation=my_softplus,\n",
|
||
" kernel_initializer=my_glorot_initializer,\n",
|
||
" kernel_regularizer=my_l1_regularizer,\n",
|
||
" kernel_constraint=my_positive_weights)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 94,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 1.4714 - mae: 0.8316 - val_loss: inf - val_mae: inf\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.8094 - mae: 0.6172 - val_loss: 2.6153 - val_mae: 0.6058\n",
|
||
"INFO:tensorflow:Assets written to: my_model_with_many_custom_parts\\assets\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_many_custom_parts\\assets\n",
|
||
"c:\\Users\\schue\\dev\\handson-ml3\\lib\\site-packages\\keras\\src\\initializers\\__init__.py:144: UserWarning: The `keras.initializers.serialize()` API should only be used for objects of type `keras.initializers.Initializer`. Found an instance of type <class 'function'>, which may lead to improper serialization.\n",
|
||
" warnings.warn(\n",
|
||
"c:\\Users\\schue\\dev\\handson-ml3\\lib\\site-packages\\keras\\src\\regularizers.py:426: UserWarning: The `keras.regularizers.serialize()` API should only be used for objects of type `keras.regularizers.Regularizer`. Found an instance of type <class 'function'>, which may lead to improper serialization.\n",
|
||
" warnings.warn(\n",
|
||
"c:\\Users\\schue\\dev\\handson-ml3\\lib\\site-packages\\keras\\src\\constraints.py:365: UserWarning: The `keras.constraints.serialize()` API should only be used for objects of type `keras.constraints.Constraint`. Found an instance of type <class 'function'>, which may lead to improper serialization.\n",
|
||
" warnings.warn(\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 0.6333 - mae: 0.5617 - val_loss: 1.1687 - val_mae: 0.5468\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.5570 - mae: 0.5303 - val_loss: 1.0440 - val_mae: 0.5250\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x1c7bd460be0>"
|
||
]
|
||
},
|
||
"execution_count": 94,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – show that building, training, saving, loading, and training again\n",
|
||
"# works fine with a model containing many custom parts\n",
|
||
"\n",
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Dense(30, activation=\"relu\", kernel_initializer=\"he_normal\",\n",
|
||
" input_shape=input_shape),\n",
|
||
" tf.keras.layers.Dense(1, activation=my_softplus,\n",
|
||
" kernel_initializer=my_glorot_initializer,\n",
|
||
" kernel_regularizer=my_l1_regularizer,\n",
|
||
" kernel_constraint=my_positive_weights)\n",
|
||
"])\n",
|
||
"model.compile(loss=\"mse\", optimizer=\"nadam\", metrics=[\"mae\"])\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))\n",
|
||
"model.save(\"my_model_with_many_custom_parts\")\n",
|
||
"model = tf.keras.models.load_model(\n",
|
||
" \"my_model_with_many_custom_parts\",\n",
|
||
" custom_objects={\n",
|
||
" \"my_l1_regularizer\": my_l1_regularizer,\n",
|
||
" \"my_positive_weights\": my_positive_weights,\n",
|
||
" \"my_glorot_initializer\": my_glorot_initializer,\n",
|
||
" \"my_softplus\": my_softplus,\n",
|
||
" }\n",
|
||
")\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 95,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class MyL1Regularizer(tf.keras.regularizers.Regularizer):\n",
|
||
" def __init__(self, factor):\n",
|
||
" self.factor = factor\n",
|
||
"\n",
|
||
" def __call__(self, weights):\n",
|
||
" return tf.reduce_sum(tf.abs(self.factor * weights))\n",
|
||
"\n",
|
||
" def get_config(self):\n",
|
||
" return {\"factor\": self.factor}"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 96,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 1.4714 - mae: 0.8316 - val_loss: inf - val_mae: inf\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.8094 - mae: 0.6172 - val_loss: 2.6153 - val_mae: 0.6058\n",
|
||
"INFO:tensorflow:Assets written to: my_model_with_many_custom_parts\\assets\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_many_custom_parts\\assets\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"c:\\Users\\schue\\dev\\handson-ml3\\lib\\site-packages\\keras\\src\\initializers\\__init__.py:144: UserWarning: The `keras.initializers.serialize()` API should only be used for objects of type `keras.initializers.Initializer`. Found an instance of type <class 'function'>, which may lead to improper serialization.\n",
|
||
" warnings.warn(\n",
|
||
"c:\\Users\\schue\\dev\\handson-ml3\\lib\\site-packages\\keras\\src\\constraints.py:365: UserWarning: The `keras.constraints.serialize()` API should only be used for objects of type `keras.constraints.Constraint`. Found an instance of type <class 'function'>, which may lead to improper serialization.\n",
|
||
" warnings.warn(\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 0.6333 - mae: 0.5617 - val_loss: 1.1687 - val_mae: 0.5468\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 975us/step - loss: 0.5570 - mae: 0.5303 - val_loss: 1.0440 - val_mae: 0.5250\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x1c7bf9db0d0>"
|
||
]
|
||
},
|
||
"execution_count": 96,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – again, show that everything works fine, this time using our\n",
|
||
"# custom regularizer class\n",
|
||
"\n",
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Dense(30, activation=\"relu\", kernel_initializer=\"he_normal\",\n",
|
||
" input_shape=input_shape),\n",
|
||
" tf.keras.layers.Dense(1, activation=my_softplus,\n",
|
||
" kernel_regularizer=MyL1Regularizer(0.01),\n",
|
||
" kernel_constraint=my_positive_weights,\n",
|
||
" kernel_initializer=my_glorot_initializer),\n",
|
||
"])\n",
|
||
"model.compile(loss=\"mse\", optimizer=\"nadam\", metrics=[\"mae\"])\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))\n",
|
||
"model.save(\"my_model_with_many_custom_parts\")\n",
|
||
"model = tf.keras.models.load_model(\n",
|
||
" \"my_model_with_many_custom_parts\",\n",
|
||
" custom_objects={\n",
|
||
" \"MyL1Regularizer\": MyL1Regularizer,\n",
|
||
" \"my_positive_weights\": my_positive_weights,\n",
|
||
" \"my_glorot_initializer\": my_glorot_initializer,\n",
|
||
" \"my_softplus\": my_softplus,\n",
|
||
" }\n",
|
||
")\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Custom Metrics"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 97,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"# extra code – once again, lets' create a basic Keras model\n",
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Dense(30, activation=\"relu\", kernel_initializer=\"he_normal\",\n",
|
||
" input_shape=input_shape),\n",
|
||
" tf.keras.layers.Dense(1),\n",
|
||
"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 98,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model.compile(loss=\"mse\", optimizer=\"nadam\", metrics=[create_huber(2.0)])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 99,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 728us/step - loss: 1.7474 - huber_fn: 0.6846\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 724us/step - loss: 0.7843 - huber_fn: 0.3136\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x1c7bd3c2320>"
|
||
]
|
||
},
|
||
"execution_count": 99,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – train the model with our custom metric\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=2)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"**Note**: if you use the same function as the loss and a metric, you may be surprised to see slightly different results. This is in part because the operations are not computed exactly in the same order, so there might be tiny floating point errors. More importantly, if you use sample weights or class weights, then the equations are a bit different:\n",
|
||
"* the `fit()` method keeps track of the mean of all batch losses seen so far since the start of the epoch. Each batch loss is the sum of the weighted instance losses divided by the _batch size_ (not the sum of weights, so the batch loss is _not_ the weighted mean of the losses).\n",
|
||
"* the metric since the start of the epoch is equal to the sum of weighted instance losses divided by sum of all weights seen so far. In other words, it is the weighted mean of all the instance losses. Not the same thing."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### Streaming metrics"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 100,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=0.8>"
|
||
]
|
||
},
|
||
"execution_count": 100,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"precision = tf.keras.metrics.Precision()\n",
|
||
"precision([0, 1, 1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 0, 1, 0, 1])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 101,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=0.5>"
|
||
]
|
||
},
|
||
"execution_count": 101,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"precision([0, 1, 0, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0, 0, 0])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 102,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=0.5>"
|
||
]
|
||
},
|
||
"execution_count": 102,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"precision.result()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 103,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Variable 'true_positives:0' shape=(1,) dtype=float32, numpy=array([4.], dtype=float32)>,\n",
|
||
" <tf.Variable 'false_positives:0' shape=(1,) dtype=float32, numpy=array([4.], dtype=float32)>]"
|
||
]
|
||
},
|
||
"execution_count": 103,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"precision.variables"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 104,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"precision.reset_states()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Creating a streaming metric:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 105,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class HuberMetric(tf.keras.metrics.Metric):\n",
|
||
" def __init__(self, threshold=1.0, **kwargs):\n",
|
||
" super().__init__(**kwargs) # handles base args (e.g., dtype)\n",
|
||
" self.threshold = threshold\n",
|
||
" self.huber_fn = create_huber(threshold)\n",
|
||
" self.total = self.add_weight(\"total\", initializer=\"zeros\")\n",
|
||
" self.count = self.add_weight(\"count\", initializer=\"zeros\")\n",
|
||
"\n",
|
||
" def update_state(self, y_true, y_pred, sample_weight=None):\n",
|
||
" sample_metrics = self.huber_fn(y_true, y_pred)\n",
|
||
" self.total.assign_add(tf.reduce_sum(sample_metrics))\n",
|
||
" self.count.assign_add(tf.cast(tf.size(y_true), tf.float32))\n",
|
||
"\n",
|
||
" def result(self):\n",
|
||
" return self.total / self.count\n",
|
||
"\n",
|
||
" def get_config(self):\n",
|
||
" base_config = super().get_config()\n",
|
||
" return {**base_config, \"threshold\": self.threshold}"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"**Extra material** – the rest of this section tests the `HuberMetric` class and shows another implementation subclassing `tf.keras.metrics.Mean`."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 106,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=14.0>"
|
||
]
|
||
},
|
||
"execution_count": 106,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"m = HuberMetric(2.)\n",
|
||
"\n",
|
||
"# total = 2 * |10 - 2| - 2²/2 = 14\n",
|
||
"# count = 1\n",
|
||
"# result = 14 / 1 = 14\n",
|
||
"m(tf.constant([[2.]]), tf.constant([[10.]]))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 107,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=7.0>"
|
||
]
|
||
},
|
||
"execution_count": 107,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# total = total + (|1 - 0|² / 2) + (2 * |9.25 - 5| - 2² / 2) = 14 + 7 = 21\n",
|
||
"# count = count + 2 = 3\n",
|
||
"# result = total / count = 21 / 3 = 7\n",
|
||
"m(tf.constant([[0.], [5.]]), tf.constant([[1.], [9.25]]))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 108,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=7.0>"
|
||
]
|
||
},
|
||
"execution_count": 108,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"m.result()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 109,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Variable 'total:0' shape=() dtype=float32, numpy=21.0>,\n",
|
||
" <tf.Variable 'count:0' shape=() dtype=float32, numpy=3.0>]"
|
||
]
|
||
},
|
||
"execution_count": 109,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"m.variables"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 110,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Variable 'total:0' shape=() dtype=float32, numpy=0.0>,\n",
|
||
" <tf.Variable 'count:0' shape=() dtype=float32, numpy=0.0>]"
|
||
]
|
||
},
|
||
"execution_count": 110,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"m.reset_states()\n",
|
||
"m.variables"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Let's check that the `HuberMetric` class works well:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 111,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Dense(30, activation=\"relu\", kernel_initializer=\"he_normal\",\n",
|
||
" input_shape=input_shape),\n",
|
||
" tf.keras.layers.Dense(1),\n",
|
||
"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 112,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model.compile(loss=create_huber(2.0), optimizer=\"nadam\",\n",
|
||
" metrics=[HuberMetric(2.0)])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 113,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 796us/step - loss: 0.6492 - huber_metric_1: 0.6492\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 770us/step - loss: 0.2912 - huber_metric_1: 0.2912\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x1c7b6760c10>"
|
||
]
|
||
},
|
||
"execution_count": 113,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.fit(X_train_scaled, y_train, epochs=2)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 114,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_metric\\assets\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_metric\\assets\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"model.save(\"my_model_with_a_custom_metric\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 115,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model = tf.keras.models.load_model(\n",
|
||
" \"my_model_with_a_custom_metric\",\n",
|
||
" custom_objects={\n",
|
||
" \"huber_fn\": create_huber(2.0),\n",
|
||
" \"HuberMetric\": HuberMetric\n",
|
||
" }\n",
|
||
")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 116,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 822us/step - loss: 0.2416 - huber_metric_1: 0.2416\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 855us/step - loss: 0.2173 - huber_metric_1: 0.2173\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x1c7c0cfc970>"
|
||
]
|
||
},
|
||
"execution_count": 116,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.fit(X_train_scaled, y_train, epochs=2)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"`model.metrics` contains the model's loss followed by the model's metric(s), so the `HuberMetric` is `model.metrics[-1]`:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 117,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"2.0"
|
||
]
|
||
},
|
||
"execution_count": 117,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.metrics[-1].threshold"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Looks like it works fine! More simply, we could have created the class like this:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 118,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class HuberMetric(tf.keras.metrics.Mean):\n",
|
||
" def __init__(self, threshold=1.0, name='HuberMetric', dtype=None):\n",
|
||
" self.threshold = threshold\n",
|
||
" self.huber_fn = create_huber(threshold)\n",
|
||
" super().__init__(name=name, dtype=dtype)\n",
|
||
"\n",
|
||
" def update_state(self, y_true, y_pred, sample_weight=None):\n",
|
||
" metric = self.huber_fn(y_true, y_pred)\n",
|
||
" super(HuberMetric, self).update_state(metric, sample_weight)\n",
|
||
"\n",
|
||
" def get_config(self):\n",
|
||
" base_config = super().get_config()\n",
|
||
" return {**base_config, \"threshold\": self.threshold}"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"This class handles shapes better, and it also supports sample weights."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 119,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Dense(30, activation=\"relu\", kernel_initializer=\"he_normal\",\n",
|
||
" input_shape=input_shape),\n",
|
||
" tf.keras.layers.Dense(1),\n",
|
||
"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 120,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model.compile(loss=tf.keras.losses.Huber(2.0), optimizer=\"nadam\",\n",
|
||
" weighted_metrics=[HuberMetric(2.0)])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 121,
|
||
"metadata": {
|
||
"scrolled": true
|
||
},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 812us/step - loss: 0.3272 - HuberMetric: 0.6594\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 859us/step - loss: 0.1449 - HuberMetric: 0.2919\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"np.random.seed(42)\n",
|
||
"sample_weight = np.random.rand(len(y_train))\n",
|
||
"history = model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" sample_weight=sample_weight)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 122,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"(0.3272010087966919, 0.3272010869771911)"
|
||
]
|
||
},
|
||
"execution_count": 122,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"(history.history[\"loss\"][0],\n",
|
||
" history.history[\"HuberMetric\"][0] * sample_weight.mean())"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 123,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_metric_v2\\assets\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_metric_v2\\assets\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"model.save(\"my_model_with_a_custom_metric_v2\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 124,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model = tf.keras.models.load_model(\"my_model_with_a_custom_metric_v2\",\n",
|
||
" custom_objects={\"HuberMetric\": HuberMetric})"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 125,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 0.2442 - HuberMetric: 0.2442\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.2184 - HuberMetric: 0.2184\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x1c7c31cdbd0>"
|
||
]
|
||
},
|
||
"execution_count": 125,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.fit(X_train_scaled, y_train, epochs=2)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 126,
|
||
"metadata": {
|
||
"scrolled": true
|
||
},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"2.0"
|
||
]
|
||
},
|
||
"execution_count": 126,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.metrics[-1].threshold"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Custom Layers"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 127,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"exponential_layer = tf.keras.layers.Lambda(lambda x: tf.exp(x))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 128,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(3,), dtype=float32, numpy=array([0.36787945, 1. , 2.7182817 ], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 128,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – like all layers, it can be used as a function:\n",
|
||
"exponential_layer([-1., 0., 1.])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Adding an exponential layer at the output of a regression model can be useful if the values to predict are positive and with very different scales (e.g., 0.001, 10., 10000)."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 129,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/5\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 0.7784 - val_loss: 0.4393\n",
|
||
"Epoch 2/5\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.5702 - val_loss: 0.4094\n",
|
||
"Epoch 3/5\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.4431 - val_loss: 0.3760\n",
|
||
"Epoch 4/5\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.4984 - val_loss: 0.3785\n",
|
||
"Epoch 5/5\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.3966 - val_loss: 0.3633\n",
|
||
"162/162 [==============================] - 0s 667us/step - loss: 0.3781\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"0.3781099021434784"
|
||
]
|
||
},
|
||
"execution_count": 129,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Dense(30, activation=\"relu\", input_shape=input_shape),\n",
|
||
" tf.keras.layers.Dense(1),\n",
|
||
" exponential_layer\n",
|
||
"])\n",
|
||
"model.compile(loss=\"mse\", optimizer=\"sgd\")\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=5,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))\n",
|
||
"model.evaluate(X_test_scaled, y_test)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Alternatively, it's often preferable to replace the targets with the logarithm of the targets (and use no activation function in the output layer)."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 130,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class MyDense(tf.keras.layers.Layer):\n",
|
||
" def __init__(self, units, activation=None, **kwargs):\n",
|
||
" super().__init__(**kwargs)\n",
|
||
" self.units = units\n",
|
||
" self.activation = tf.keras.activations.get(activation)\n",
|
||
"\n",
|
||
" def build(self, batch_input_shape):\n",
|
||
" self.kernel = self.add_weight(\n",
|
||
" name=\"kernel\", shape=[batch_input_shape[-1], self.units],\n",
|
||
" initializer=\"he_normal\")\n",
|
||
" self.bias = self.add_weight(\n",
|
||
" name=\"bias\", shape=[self.units], initializer=\"zeros\")\n",
|
||
"\n",
|
||
" def call(self, X):\n",
|
||
" return self.activation(X @ self.kernel + self.bias)\n",
|
||
"\n",
|
||
" def get_config(self):\n",
|
||
" base_config = super().get_config()\n",
|
||
" return {**base_config, \"units\": self.units,\n",
|
||
" \"activation\": tf.keras.activations.serialize(self.activation)}"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 131,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 3.1183 - val_loss: 6.9549\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.8702 - val_loss: 3.2627\n",
|
||
"162/162 [==============================] - 0s 690us/step - loss: 0.7039\n",
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_layer\\assets\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_layer\\assets\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows that a custom layer can be used normally\n",
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" MyDense(30, activation=\"relu\", input_shape=input_shape),\n",
|
||
" MyDense(1)\n",
|
||
"])\n",
|
||
"model.compile(loss=\"mse\", optimizer=\"nadam\")\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))\n",
|
||
"model.evaluate(X_test_scaled, y_test)\n",
|
||
"model.save(\"my_model_with_a_custom_layer\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 132,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 0.5945 - val_loss: 0.5318\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.4712 - val_loss: 0.5751\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x1c7c5776500>"
|
||
]
|
||
},
|
||
"execution_count": 132,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows how to load a model with a custom layer\n",
|
||
"model = tf.keras.models.load_model(\"my_model_with_a_custom_layer\",\n",
|
||
" custom_objects={\"MyDense\": MyDense})\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 133,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class MyMultiLayer(tf.keras.layers.Layer):\n",
|
||
" def call(self, X):\n",
|
||
" X1, X2 = X\n",
|
||
" print(\"X1.shape: \", X1.shape ,\" X2.shape: \", X2.shape) # extra code\n",
|
||
" return X1 + X2, X1 * X2, X1 / X2"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Our custom layer can be called using the functional API like this:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 134,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"X1.shape: (None, 2) X2.shape: (None, 2)\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"(<KerasTensor: shape=(None, 2) dtype=float32 (created by layer 'my_multi_layer')>,\n",
|
||
" <KerasTensor: shape=(None, 2) dtype=float32 (created by layer 'my_multi_layer')>,\n",
|
||
" <KerasTensor: shape=(None, 2) dtype=float32 (created by layer 'my_multi_layer')>)"
|
||
]
|
||
},
|
||
"execution_count": 134,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – tests MyMultiLayer with symbolic inputs\n",
|
||
"inputs1 = tf.keras.layers.Input(shape=[2])\n",
|
||
"inputs2 = tf.keras.layers.Input(shape=[2])\n",
|
||
"MyMultiLayer()((inputs1, inputs2))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Note that the `call()` method receives symbolic inputs, and it returns symbolic outputs. The shapes are only partially specified at this stage: we don't know the batch size, which is why the first dimension is `None`.\n",
|
||
"\n",
|
||
"We can also pass actual data to the custom layer:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 135,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"X1.shape: (2, 2) X2.shape: (2, 2)\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"(<tf.Tensor: shape=(2, 2), dtype=float32, numpy=\n",
|
||
" array([[ 9., 18.],\n",
|
||
" [ 6., 10.]], dtype=float32)>,\n",
|
||
" <tf.Tensor: shape=(2, 2), dtype=float32, numpy=\n",
|
||
" array([[18., 72.],\n",
|
||
" [ 8., 21.]], dtype=float32)>,\n",
|
||
" <tf.Tensor: shape=(2, 2), dtype=float32, numpy=\n",
|
||
" array([[0.5 , 0.5 ],\n",
|
||
" [0.5 , 2.3333333]], dtype=float32)>)"
|
||
]
|
||
},
|
||
"execution_count": 135,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – tests MyMultiLayer with actual data\n",
|
||
"X1, X2 = np.array([[3., 6.], [2., 7.]]), np.array([[6., 12.], [4., 3.]])\n",
|
||
"MyMultiLayer()((X1, X2))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Now let's create a layer with a different behavior during training and testing:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 136,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class MyGaussianNoise(tf.keras.layers.Layer):\n",
|
||
" def __init__(self, stddev, **kwargs):\n",
|
||
" super().__init__(**kwargs)\n",
|
||
" self.stddev = stddev\n",
|
||
"\n",
|
||
" def call(self, X, training=None):\n",
|
||
" if training:\n",
|
||
" noise = tf.random.normal(tf.shape(X), stddev=self.stddev)\n",
|
||
" return X + noise\n",
|
||
" else:\n",
|
||
" return X"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Here's a simple model that uses this custom layer:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 137,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 2.2220 - val_loss: 25.1506\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 968us/step - loss: 1.4104 - val_loss: 17.0415\n",
|
||
"162/162 [==============================] - 0s 643us/step - loss: 1.1059\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"1.1058681011199951"
|
||
]
|
||
},
|
||
"execution_count": 137,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – tests MyGaussianNoise\n",
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" MyGaussianNoise(stddev=1.0, input_shape=input_shape),\n",
|
||
" tf.keras.layers.Dense(30, activation=\"relu\",\n",
|
||
" kernel_initializer=\"he_normal\"),\n",
|
||
" tf.keras.layers.Dense(1)\n",
|
||
"])\n",
|
||
"model.compile(loss=\"mse\", optimizer=\"nadam\")\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))\n",
|
||
"model.evaluate(X_test_scaled, y_test)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Custom Models"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 138,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class ResidualBlock(tf.keras.layers.Layer):\n",
|
||
" def __init__(self, n_layers, n_neurons, **kwargs):\n",
|
||
" super().__init__(**kwargs)\n",
|
||
" self.hidden = [tf.keras.layers.Dense(n_neurons, activation=\"relu\",\n",
|
||
" kernel_initializer=\"he_normal\")\n",
|
||
" for _ in range(n_layers)]\n",
|
||
"\n",
|
||
" def call(self, inputs):\n",
|
||
" Z = inputs\n",
|
||
" for layer in self.hidden:\n",
|
||
" Z = layer(Z)\n",
|
||
" return inputs + Z"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 139,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class ResidualRegressor(tf.keras.Model):\n",
|
||
" def __init__(self, output_dim, **kwargs):\n",
|
||
" super().__init__(**kwargs)\n",
|
||
" self.hidden1 = tf.keras.layers.Dense(30, activation=\"relu\",\n",
|
||
" kernel_initializer=\"he_normal\")\n",
|
||
" self.block1 = ResidualBlock(2, 30)\n",
|
||
" self.block2 = ResidualBlock(2, 30)\n",
|
||
" self.out = tf.keras.layers.Dense(output_dim)\n",
|
||
"\n",
|
||
" def call(self, inputs):\n",
|
||
" Z = self.hidden1(inputs)\n",
|
||
" for _ in range(1 + 3):\n",
|
||
" Z = self.block1(Z)\n",
|
||
" Z = self.block2(Z)\n",
|
||
" return self.out(Z)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 140,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 2s 1ms/step - loss: 32.7847\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 986us/step - loss: 1.3612\n",
|
||
"162/162 [==============================] - 0s 713us/step - loss: 1.1603\n",
|
||
"INFO:tensorflow:Assets written to: my_custom_model\\assets\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_custom_model\\assets\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows that the model can be used normally\n",
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = ResidualRegressor(1)\n",
|
||
"model.compile(loss=\"mse\", optimizer=\"nadam\")\n",
|
||
"history = model.fit(X_train_scaled, y_train, epochs=2)\n",
|
||
"score = model.evaluate(X_test_scaled, y_test)\n",
|
||
"model.save(\"my_custom_model\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 141,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 2s 1ms/step - loss: 1.3451\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.7928\n",
|
||
"1/1 [==============================] - 0s 66ms/step\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"array([[1.1431925],\n",
|
||
" [1.0584602],\n",
|
||
" [4.71127 ]], dtype=float32)"
|
||
]
|
||
},
|
||
"execution_count": 141,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – the model can be loaded and you can continue training or use it\n",
|
||
"# to make predictions\n",
|
||
"model = tf.keras.models.load_model(\"my_custom_model\")\n",
|
||
"history = model.fit(X_train_scaled, y_train, epochs=2)\n",
|
||
"model.predict(X_test_scaled[:3])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"We could have defined the model using the sequential API instead:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 142,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"block1 = ResidualBlock(2, 30)\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Dense(30, activation=\"relu\",\n",
|
||
" kernel_initializer=\"he_normal\"),\n",
|
||
" block1, block1, block1, block1,\n",
|
||
" ResidualBlock(2, 30),\n",
|
||
" tf.keras.layers.Dense(1)\n",
|
||
"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Losses and Metrics Based on Model Internals"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 143,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class ReconstructingRegressor(tf.keras.Model):\n",
|
||
" def __init__(self, output_dim, **kwargs):\n",
|
||
" super().__init__(**kwargs)\n",
|
||
" self.hidden = [tf.keras.layers.Dense(30, activation=\"relu\",\n",
|
||
" kernel_initializer=\"he_normal\")\n",
|
||
" for _ in range(5)]\n",
|
||
" self.out = tf.keras.layers.Dense(output_dim)\n",
|
||
" self.reconstruction_mean = tf.keras.metrics.Mean(\n",
|
||
" name=\"reconstruction_error\")\n",
|
||
"\n",
|
||
" def build(self, batch_input_shape):\n",
|
||
" n_inputs = batch_input_shape[-1]\n",
|
||
" self.reconstruct = tf.keras.layers.Dense(n_inputs)\n",
|
||
"\n",
|
||
" def call(self, inputs, training=None):\n",
|
||
" Z = inputs\n",
|
||
" for layer in self.hidden:\n",
|
||
" Z = layer(Z)\n",
|
||
" reconstruction = self.reconstruct(Z)\n",
|
||
" recon_loss = tf.reduce_mean(tf.square(reconstruction - inputs))\n",
|
||
" self.add_loss(0.05 * recon_loss)\n",
|
||
" if training:\n",
|
||
" result = self.reconstruction_mean(recon_loss)\n",
|
||
" self.add_metric(result)\n",
|
||
" return self.out(Z)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 144,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/5\n",
|
||
"363/363 [==============================] - 2s 1ms/step - loss: 0.8198 - reconstruction_error: 1.0892\n",
|
||
"Epoch 2/5\n",
|
||
"363/363 [==============================] - 0s 984us/step - loss: 0.4778 - reconstruction_error: 0.5583\n",
|
||
"Epoch 3/5\n",
|
||
"363/363 [==============================] - 0s 996us/step - loss: 0.4419 - reconstruction_error: 0.4227\n",
|
||
"Epoch 4/5\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.3852 - reconstruction_error: 0.3587\n",
|
||
"Epoch 5/5\n",
|
||
"363/363 [==============================] - 0s 985us/step - loss: 0.3714 - reconstruction_error: 0.3245\n",
|
||
"162/162 [==============================] - 0s 664us/step\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code\n",
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = ReconstructingRegressor(1)\n",
|
||
"model.compile(loss=\"mse\", optimizer=\"nadam\")\n",
|
||
"history = model.fit(X_train_scaled, y_train, epochs=5)\n",
|
||
"y_pred = model.predict(X_test_scaled)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Computing Gradients Using Autodiff"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 145,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"def f(w1, w2):\n",
|
||
" return 3 * w1 ** 2 + 2 * w1 * w2"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 146,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"36.000003007075065"
|
||
]
|
||
},
|
||
"execution_count": 146,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"w1, w2 = 5, 3\n",
|
||
"eps = 1e-6\n",
|
||
"(f(w1 + eps, w2) - f(w1, w2)) / eps"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 147,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"10.000000003174137"
|
||
]
|
||
},
|
||
"execution_count": 147,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"(f(w1, w2 + eps) - f(w1, w2)) / eps"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 148,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"w1, w2 = tf.Variable(5.), tf.Variable(3.)\n",
|
||
"with tf.GradientTape() as tape:\n",
|
||
" z = f(w1, w2)\n",
|
||
"\n",
|
||
"gradients = tape.gradient(z, [w1, w2])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 149,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor: shape=(), dtype=float32, numpy=36.0>,\n",
|
||
" <tf.Tensor: shape=(), dtype=float32, numpy=10.0>]"
|
||
]
|
||
},
|
||
"execution_count": 149,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"gradients"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 150,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"A non-persistent GradientTape can only be used to compute one set of gradients (or jacobians)\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"with tf.GradientTape() as tape:\n",
|
||
" z = f(w1, w2)\n",
|
||
"\n",
|
||
"dz_dw1 = tape.gradient(z, w1) # returns tensor 36.0\n",
|
||
"try:\n",
|
||
" dz_dw2 = tape.gradient(z, w2) # raises a RuntimeError!\n",
|
||
"except RuntimeError as ex:\n",
|
||
" print(ex)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 151,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"with tf.GradientTape(persistent=True) as tape:\n",
|
||
" z = f(w1, w2)\n",
|
||
"\n",
|
||
"dz_dw1 = tape.gradient(z, w1) # returns tensor 36.0\n",
|
||
"dz_dw2 = tape.gradient(z, w2) # returns tensor 10.0, works fine now!\n",
|
||
"del tape"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 152,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"(<tf.Tensor: shape=(), dtype=float32, numpy=36.0>,\n",
|
||
" <tf.Tensor: shape=(), dtype=float32, numpy=10.0>)"
|
||
]
|
||
},
|
||
"execution_count": 152,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"dz_dw1, dz_dw2"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 153,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"c1, c2 = tf.constant(5.), tf.constant(3.)\n",
|
||
"with tf.GradientTape() as tape:\n",
|
||
" z = f(c1, c2)\n",
|
||
"\n",
|
||
"gradients = tape.gradient(z, [c1, c2])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 154,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[None, None]"
|
||
]
|
||
},
|
||
"execution_count": 154,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"gradients"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 155,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"with tf.GradientTape() as tape:\n",
|
||
" tape.watch(c1)\n",
|
||
" tape.watch(c2)\n",
|
||
" z = f(c1, c2)\n",
|
||
"\n",
|
||
"gradients = tape.gradient(z, [c1, c2])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 156,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor: shape=(), dtype=float32, numpy=36.0>,\n",
|
||
" <tf.Tensor: shape=(), dtype=float32, numpy=10.0>]"
|
||
]
|
||
},
|
||
"execution_count": 156,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"gradients"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 157,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor: shape=(), dtype=float32, numpy=136.0>,\n",
|
||
" <tf.Tensor: shape=(), dtype=float32, numpy=30.0>]"
|
||
]
|
||
},
|
||
"execution_count": 157,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – if given a vector, tape.gradient() will compute the gradient of\n",
|
||
"# the vector's sum.\n",
|
||
"with tf.GradientTape() as tape:\n",
|
||
" z1 = f(w1, w2 + 2.)\n",
|
||
" z2 = f(w1, w2 + 5.)\n",
|
||
" z3 = f(w1, w2 + 7.)\n",
|
||
"\n",
|
||
"tape.gradient([z1, z2, z3], [w1, w2])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 158,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor: shape=(), dtype=float32, numpy=136.0>,\n",
|
||
" <tf.Tensor: shape=(), dtype=float32, numpy=30.0>]"
|
||
]
|
||
},
|
||
"execution_count": 158,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows that we get the same result as the previous cell\n",
|
||
"with tf.GradientTape() as tape:\n",
|
||
" z1 = f(w1, w2 + 2.)\n",
|
||
" z2 = f(w1, w2 + 5.)\n",
|
||
" z3 = f(w1, w2 + 7.)\n",
|
||
" z = z1 + z2 + z3\n",
|
||
"\n",
|
||
"tape.gradient(z, [w1, w2])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 159,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"# extra code – shows how to compute the jacobians and the hessians\n",
|
||
"with tf.GradientTape(persistent=True) as hessian_tape:\n",
|
||
" with tf.GradientTape() as jacobian_tape:\n",
|
||
" z = f(w1, w2)\n",
|
||
" jacobians = jacobian_tape.gradient(z, [w1, w2])\n",
|
||
"hessians = [hessian_tape.gradient(jacobian, [w1, w2])\n",
|
||
" for jacobian in jacobians]\n",
|
||
"del hessian_tape"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 160,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor: shape=(), dtype=float32, numpy=36.0>,\n",
|
||
" <tf.Tensor: shape=(), dtype=float32, numpy=10.0>]"
|
||
]
|
||
},
|
||
"execution_count": 160,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"jacobians"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 161,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[[<tf.Tensor: shape=(), dtype=float32, numpy=6.0>,\n",
|
||
" <tf.Tensor: shape=(), dtype=float32, numpy=2.0>],\n",
|
||
" [<tf.Tensor: shape=(), dtype=float32, numpy=2.0>, None]]"
|
||
]
|
||
},
|
||
"execution_count": 161,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"hessians"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 162,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"def f(w1, w2):\n",
|
||
" return 3 * w1 ** 2 + tf.stop_gradient(2 * w1 * w2)\n",
|
||
"\n",
|
||
"with tf.GradientTape() as tape:\n",
|
||
" z = f(w1, w2) # same result as without stop_gradient()\n",
|
||
"\n",
|
||
"gradients = tape.gradient(z, [w1, w2])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 163,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor: shape=(), dtype=float32, numpy=30.0>, None]"
|
||
]
|
||
},
|
||
"execution_count": 163,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"gradients"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 164,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor: shape=(), dtype=float32, numpy=inf>]"
|
||
]
|
||
},
|
||
"execution_count": 164,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"x = tf.Variable(1e-50)\n",
|
||
"with tf.GradientTape() as tape:\n",
|
||
" z = tf.sqrt(x)\n",
|
||
"\n",
|
||
"tape.gradient(z, [x])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 165,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=30.0>"
|
||
]
|
||
},
|
||
"execution_count": 165,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.math.log(tf.exp(tf.constant(30., dtype=tf.float32)) + 1.)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 166,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor: shape=(1,), dtype=float32, numpy=array([nan], dtype=float32)>]"
|
||
]
|
||
},
|
||
"execution_count": 166,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"x = tf.Variable([1.0e30])\n",
|
||
"with tf.GradientTape() as tape:\n",
|
||
" z = my_softplus(x)\n",
|
||
"\n",
|
||
"tape.gradient(z, [x])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 167,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"def my_softplus(z):\n",
|
||
" return tf.math.log(1 + tf.exp(-tf.abs(z))) + tf.maximum(0., z)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Here is the proof that this equation is equal to log(1 + exp(_z_)):\n",
|
||
"* softplus(_z_) = log(1 + exp(_z_))\n",
|
||
"* softplus(_z_) = log(1 + exp(_z_)) - log(exp(_z_)) + log(exp(_z_)) ; **just adding and subtracting the same value**\n",
|
||
"* softplus(_z_) = log\\[(1 + exp(_z_)) / exp(_z_)\\] + log(exp(_z_)) ; **since log(_a_) - log(_b_) = log(_a_ / _b_)**\n",
|
||
"* softplus(_z_) = log\\[(1 + exp(_z_)) / exp(_z_)\\] + _z_ ; **since log(exp(_z_)) = _z_**\n",
|
||
"* softplus(_z_) = log\\[1 / exp(_z_) + exp(_z_) / exp(_z_)\\] + _z_ ; **since (1 + _a_) / _b_ = 1 / _b_ + _a_ / _b_**\n",
|
||
"* softplus(_z_) = log\\[exp(–_z_) + 1\\] + _z_ ; **since 1 / exp(_z_) = exp(–z), and exp(_z_) / exp(_z_) = 1**\n",
|
||
"* softplus(_z_) = softplus(–_z_) + _z_ ; **we recognize the definition at the top, but with –_z_**\n",
|
||
"* softplus(_z_) = softplus(–|_z_|) + max(0, _z_) ; **if you consider both cases, _z_ < 0 or _z_ ≥ 0, you will see that this works**"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 168,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"@tf.custom_gradient\n",
|
||
"def my_softplus(z):\n",
|
||
" def my_softplus_gradients(grads): # grads = backprop'ed from upper layers\n",
|
||
" return grads * (1 - 1 / (1 + tf.exp(z))) # stable grads of softplus\n",
|
||
"\n",
|
||
" result = tf.math.log(1 + tf.exp(-tf.abs(z))) + tf.maximum(0., z)\n",
|
||
" return result, my_softplus_gradients"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 169,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"(<tf.Tensor: shape=(1,), dtype=float32, numpy=array([1000.], dtype=float32)>,\n",
|
||
" [<tf.Tensor: shape=(1,), dtype=float32, numpy=array([1.], dtype=float32)>])"
|
||
]
|
||
},
|
||
"execution_count": 169,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows that the function is now stable, as well as its gradients\n",
|
||
"x = tf.Variable([1000.])\n",
|
||
"with tf.GradientTape() as tape:\n",
|
||
" z = my_softplus(x)\n",
|
||
"\n",
|
||
"z, tape.gradient(z, [x])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Custom Training Loops"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 170,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42) # extra code – to ensure reproducibility\n",
|
||
"l2_reg = tf.keras.regularizers.l2(0.05)\n",
|
||
"model = tf.keras.models.Sequential([\n",
|
||
" tf.keras.layers.Dense(30, activation=\"relu\", kernel_initializer=\"he_normal\",\n",
|
||
" kernel_regularizer=l2_reg),\n",
|
||
" tf.keras.layers.Dense(1, kernel_regularizer=l2_reg)\n",
|
||
"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 171,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"def random_batch(X, y, batch_size=32):\n",
|
||
" idx = np.random.randint(len(X), size=batch_size)\n",
|
||
" return X[idx], y[idx]"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 172,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"def print_status_bar(step, total, loss, metrics=None):\n",
|
||
" metrics = \" - \".join([f\"{m.name}: {m.result():.4f}\"\n",
|
||
" for m in [loss] + (metrics or [])])\n",
|
||
" end = \"\" if step < total else \"\\n\"\n",
|
||
" print(f\"\\r{step}/{total} - \" + metrics, end=end)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 173,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 174,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"n_epochs = 5\n",
|
||
"batch_size = 32\n",
|
||
"n_steps = len(X_train) // batch_size\n",
|
||
"optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)\n",
|
||
"loss_fn = tf.keras.losses.mean_squared_error\n",
|
||
"mean_loss = tf.keras.metrics.Mean()\n",
|
||
"metrics = [tf.keras.metrics.MeanAbsoluteError()]"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 175,
|
||
"metadata": {
|
||
"scrolled": true
|
||
},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/5\n",
|
||
"362/362 - mean: 3.5419 - mean_absolute_error: 0.6640\n",
|
||
"Epoch 2/5\n",
|
||
"362/362 - mean: 1.8693 - mean_absolute_error: 0.5431\n",
|
||
"Epoch 3/5\n",
|
||
"362/362 - mean: 1.1428 - mean_absolute_error: 0.5030\n",
|
||
"Epoch 4/5\n",
|
||
"362/362 - mean: 0.8501 - mean_absolute_error: 0.4977\n",
|
||
"Epoch 5/5\n",
|
||
"362/362 - mean: 0.7280 - mean_absolute_error: 0.5014\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"for epoch in range(1, n_epochs + 1):\n",
|
||
" print(f\"Epoch {epoch}/{n_epochs}\")\n",
|
||
" for step in range(1, n_steps + 1):\n",
|
||
" X_batch, y_batch = random_batch(X_train_scaled, y_train)\n",
|
||
" with tf.GradientTape() as tape:\n",
|
||
" y_pred = model(X_batch, training=True)\n",
|
||
" main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred))\n",
|
||
" loss = tf.add_n([main_loss] + model.losses)\n",
|
||
"\n",
|
||
" gradients = tape.gradient(loss, model.trainable_variables)\n",
|
||
" optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n",
|
||
"\n",
|
||
" # extra code – if your model has variable constraints\n",
|
||
" for variable in model.variables:\n",
|
||
" if variable.constraint is not None:\n",
|
||
" variable.assign(variable.constraint(variable))\n",
|
||
"\n",
|
||
" mean_loss(loss)\n",
|
||
" for metric in metrics:\n",
|
||
" metric(y_batch, y_pred)\n",
|
||
"\n",
|
||
" print_status_bar(step, n_steps, mean_loss, metrics)\n",
|
||
"\n",
|
||
" for metric in [mean_loss] + metrics:\n",
|
||
" metric.reset_states()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 176,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "998f39b2e54e45c39cc5e49762920161",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"All epochs: 0%| | 0/5 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "b7ff21aefc9d4806a86c08c815629763",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 1/5: 0%| | 0/362 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "fd9c1c7f1eae4ec5b989f75c4c3bc616",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 2/5: 0%| | 0/362 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "1c66b6ec68184579a8611026da49e3be",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 3/5: 0%| | 0/362 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "47f209f17ce042bdb1c5e20900c49409",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 4/5: 0%| | 0/362 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "cadab1abf01c495998a43bdcbe0fff21",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 5/5: 0%| | 0/362 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows how to use the tqdm package to display nice progress bars\n",
|
||
"\n",
|
||
"from tqdm.notebook import trange\n",
|
||
"from collections import OrderedDict\n",
|
||
"with trange(1, n_epochs + 1, desc=\"All epochs\") as epochs:\n",
|
||
" for epoch in epochs:\n",
|
||
" with trange(1, n_steps + 1, desc=f\"Epoch {epoch}/{n_epochs}\") as steps:\n",
|
||
" for step in steps:\n",
|
||
" X_batch, y_batch = random_batch(X_train_scaled, y_train)\n",
|
||
" with tf.GradientTape() as tape:\n",
|
||
" y_pred = model(X_batch)\n",
|
||
" main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred))\n",
|
||
" loss = tf.add_n([main_loss] + model.losses)\n",
|
||
"\n",
|
||
" gradients = tape.gradient(loss, model.trainable_variables)\n",
|
||
" optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n",
|
||
"\n",
|
||
" for variable in model.variables:\n",
|
||
" if variable.constraint is not None:\n",
|
||
" variable.assign(variable.constraint(variable))\n",
|
||
"\n",
|
||
" status = OrderedDict()\n",
|
||
" mean_loss(loss)\n",
|
||
" status[\"loss\"] = mean_loss.result().numpy()\n",
|
||
" for metric in metrics:\n",
|
||
" metric(y_batch, y_pred)\n",
|
||
" status[metric.name] = metric.result().numpy()\n",
|
||
"\n",
|
||
" steps.set_postfix(status)\n",
|
||
"\n",
|
||
" for metric in [mean_loss] + metrics:\n",
|
||
" metric.reset_states()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## TensorFlow Functions"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 177,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"def cube(x):\n",
|
||
" return x ** 3"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 178,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"8"
|
||
]
|
||
},
|
||
"execution_count": 178,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"cube(2)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 179,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=8.0>"
|
||
]
|
||
},
|
||
"execution_count": 179,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"cube(tf.constant(2.0))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 180,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tensorflow.python.eager.polymorphic_function.polymorphic_function.Function at 0x1c7cc7fc5b0>"
|
||
]
|
||
},
|
||
"execution_count": 180,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf_cube = tf.function(cube)\n",
|
||
"tf_cube"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 181,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=int32, numpy=8>"
|
||
]
|
||
},
|
||
"execution_count": 181,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf_cube(2)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 182,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=8.0>"
|
||
]
|
||
},
|
||
"execution_count": 182,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf_cube(tf.constant(2.0))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 183,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"@tf.function\n",
|
||
"def tf_cube(x):\n",
|
||
" return x ** 3"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"**Note:** the rest of the code in this section is in appendix D."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### TF Functions and Concrete Functions"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 184,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<ConcreteFunction tf_cube(x) at 0x1C7C8F8FCD0>"
|
||
]
|
||
},
|
||
"execution_count": 184,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"concrete_function = tf_cube.get_concrete_function(tf.constant(2.0))\n",
|
||
"concrete_function"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 185,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=8.0>"
|
||
]
|
||
},
|
||
"execution_count": 185,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"concrete_function(tf.constant(2.0))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 186,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"True"
|
||
]
|
||
},
|
||
"execution_count": 186,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"concrete_function is tf_cube.get_concrete_function(tf.constant(2.0))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### Exploring Function Definitions and Graphs"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 187,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tensorflow.python.framework.func_graph.FuncGraph at 0x1c7cc83edd0>"
|
||
]
|
||
},
|
||
"execution_count": 187,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"concrete_function.graph"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 188,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Operation 'x' type=Placeholder>,\n",
|
||
" <tf.Operation 'pow/y' type=Const>,\n",
|
||
" <tf.Operation 'pow' type=Pow>,\n",
|
||
" <tf.Operation 'Identity' type=Identity>]"
|
||
]
|
||
},
|
||
"execution_count": 188,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"ops = concrete_function.graph.get_operations()\n",
|
||
"ops"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 189,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor 'x:0' shape=() dtype=float32>,\n",
|
||
" <tf.Tensor 'pow/y:0' shape=() dtype=float32>]"
|
||
]
|
||
},
|
||
"execution_count": 189,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"pow_op = ops[2]\n",
|
||
"list(pow_op.inputs)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 190,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor 'pow:0' shape=() dtype=float32>]"
|
||
]
|
||
},
|
||
"execution_count": 190,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"pow_op.outputs"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 191,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Operation 'x' type=Placeholder>"
|
||
]
|
||
},
|
||
"execution_count": 191,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"concrete_function.graph.get_operation_by_name('x')"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 192,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor 'Identity:0' shape=() dtype=float32>"
|
||
]
|
||
},
|
||
"execution_count": 192,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"concrete_function.graph.get_tensor_by_name('Identity:0')"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 193,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"name: \"__inference_tf_cube_592461\"\n",
|
||
"input_arg {\n",
|
||
" name: \"x\"\n",
|
||
" type: DT_FLOAT\n",
|
||
"}\n",
|
||
"output_arg {\n",
|
||
" name: \"identity\"\n",
|
||
" type: DT_FLOAT\n",
|
||
"}"
|
||
]
|
||
},
|
||
"execution_count": 193,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"concrete_function.function_def.signature"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### How TF Functions Trace Python Functions to Extract Their Computation Graphs"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 194,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"@tf.function\n",
|
||
"def tf_cube(x):\n",
|
||
" print(f\"x = {x}\")\n",
|
||
" return x ** 3"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 195,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"x = Tensor(\"x:0\", shape=(), dtype=float32)\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"result = tf_cube(tf.constant(2.0))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 196,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=8.0>"
|
||
]
|
||
},
|
||
"execution_count": 196,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"result"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 197,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"x = 2\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"result = tf_cube(2)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 198,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"x = 3\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"result = tf_cube(3)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 199,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"x = Tensor(\"x:0\", shape=(1, 2), dtype=float32)\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"result = tf_cube(tf.constant([[1., 2.]])) # New shape: trace!"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 200,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"x = Tensor(\"x:0\", shape=(2, 2), dtype=float32)\n",
|
||
"WARNING:tensorflow:5 out of the last 5 calls to <function tf_cube at 0x000001C7CC83F400> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"WARNING:tensorflow:5 out of the last 5 calls to <function tf_cube at 0x000001C7CC83F400> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"result = tf_cube(tf.constant([[3., 4.], [5., 6.]])) # New shape: trace!"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 201,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"result = tf_cube(tf.constant([[7., 8.], [9., 10.]])) # Same shape: no trace"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"It is also possible to specify a particular input signature:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 202,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"@tf.function(input_signature=[tf.TensorSpec([None, 28, 28], tf.float32)])\n",
|
||
"def shrink(images):\n",
|
||
" print(\"Tracing\", images) # extra code to show when tracing happens\n",
|
||
" return images[:, ::2, ::2] # drop half the rows and columns"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 203,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 204,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Tracing Tensor(\"images:0\", shape=(None, 28, 28), dtype=float32)\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"img_batch_1 = tf.random.uniform(shape=[100, 28, 28])\n",
|
||
"img_batch_2 = tf.random.uniform(shape=[50, 28, 28])\n",
|
||
"preprocessed_images = shrink(img_batch_1) # Works fine, traces the function\n",
|
||
"preprocessed_images = shrink(img_batch_2) # Works fine, same concrete function"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 205,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Binding inputs to tf.function failed due to `Can not cast TensorSpec(shape=(2, 2, 2), dtype=tf.float32, name=None) to TensorSpec(shape=(None, 28, 28), dtype=tf.float32, name=None)`. Received args: (<tf.Tensor: shape=(2, 2, 2), dtype=float32, numpy=\n",
|
||
"array([[[0.7413678 , 0.62854624],\n",
|
||
" [0.01738465, 0.3431449 ]],\n",
|
||
"\n",
|
||
" [[0.51063764, 0.3777541 ],\n",
|
||
" [0.07321596, 0.02137029]]], dtype=float32)>,) and kwargs: {} for signature: (images: TensorSpec(shape=(None, 28, 28), dtype=tf.float32, name=None)).\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"img_batch_3 = tf.random.uniform(shape=[2, 2, 2])\n",
|
||
"try:\n",
|
||
" preprocessed_images = shrink(img_batch_3) # TypeError! Incompatible inputs\n",
|
||
"except TypeError as ex:\n",
|
||
" print(ex)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### Using Autograph To Capture Control Flow"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"A \"static\" `for` loop using `range()`:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 206,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"@tf.function\n",
|
||
"def add_10(x):\n",
|
||
" for i in range(10):\n",
|
||
" x += 1\n",
|
||
" return x"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 207,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=int32, numpy=15>"
|
||
]
|
||
},
|
||
"execution_count": 207,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"add_10(tf.constant(5))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 208,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Operation 'x' type=Placeholder>,\n",
|
||
" <tf.Operation 'add/y' type=Const>,\n",
|
||
" <tf.Operation 'add' type=AddV2>,\n",
|
||
" <tf.Operation 'add_1/y' type=Const>,\n",
|
||
" <tf.Operation 'add_1' type=AddV2>,\n",
|
||
" <tf.Operation 'add_2/y' type=Const>,\n",
|
||
" <tf.Operation 'add_2' type=AddV2>,\n",
|
||
" <tf.Operation 'add_3/y' type=Const>,\n",
|
||
" <tf.Operation 'add_3' type=AddV2>,\n",
|
||
" <tf.Operation 'add_4/y' type=Const>,\n",
|
||
" <tf.Operation 'add_4' type=AddV2>,\n",
|
||
" <tf.Operation 'add_5/y' type=Const>,\n",
|
||
" <tf.Operation 'add_5' type=AddV2>,\n",
|
||
" <tf.Operation 'add_6/y' type=Const>,\n",
|
||
" <tf.Operation 'add_6' type=AddV2>,\n",
|
||
" <tf.Operation 'add_7/y' type=Const>,\n",
|
||
" <tf.Operation 'add_7' type=AddV2>,\n",
|
||
" <tf.Operation 'add_8/y' type=Const>,\n",
|
||
" <tf.Operation 'add_8' type=AddV2>,\n",
|
||
" <tf.Operation 'add_9/y' type=Const>,\n",
|
||
" <tf.Operation 'add_9' type=AddV2>,\n",
|
||
" <tf.Operation 'Identity' type=Identity>]"
|
||
]
|
||
},
|
||
"execution_count": 208,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"add_10.get_concrete_function(tf.constant(5)).graph.get_operations()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"A \"dynamic\" loop using `tf.while_loop()`:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 209,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"# extra code – shows how to use tf.while_loop (usually @tf.function is simpler)\n",
|
||
"@tf.function\n",
|
||
"def add_10(x):\n",
|
||
" condition = lambda i, x: tf.less(i, 10)\n",
|
||
" body = lambda i, x: (tf.add(i, 1), tf.add(x, 1))\n",
|
||
" final_i, final_x = tf.while_loop(condition, body, [tf.constant(0), x])\n",
|
||
" return final_x"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 210,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=int32, numpy=15>"
|
||
]
|
||
},
|
||
"execution_count": 210,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"add_10(tf.constant(5))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 211,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Operation 'x' type=Placeholder>,\n",
|
||
" <tf.Operation 'Const' type=Const>,\n",
|
||
" <tf.Operation 'while/maximum_iterations' type=Const>,\n",
|
||
" <tf.Operation 'while/loop_counter' type=Const>,\n",
|
||
" <tf.Operation 'while' type=StatelessWhile>,\n",
|
||
" <tf.Operation 'Identity' type=Identity>]"
|
||
]
|
||
},
|
||
"execution_count": 211,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"add_10.get_concrete_function(tf.constant(5)).graph.get_operations()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"A \"dynamic\" `for` loop using `tf.range()` (captured by autograph):"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 212,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"@tf.function\n",
|
||
"def add_10(x):\n",
|
||
" for i in tf.range(10):\n",
|
||
" x = x + 1\n",
|
||
" return x"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 213,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Operation 'x' type=Placeholder>,\n",
|
||
" <tf.Operation 'range/start' type=Const>,\n",
|
||
" <tf.Operation 'range/limit' type=Const>,\n",
|
||
" <tf.Operation 'range/delta' type=Const>,\n",
|
||
" <tf.Operation 'range' type=Range>,\n",
|
||
" <tf.Operation 'sub' type=Sub>,\n",
|
||
" <tf.Operation 'floordiv' type=FloorDiv>,\n",
|
||
" <tf.Operation 'mod' type=FloorMod>,\n",
|
||
" <tf.Operation 'zeros_like' type=Const>,\n",
|
||
" <tf.Operation 'NotEqual' type=NotEqual>,\n",
|
||
" <tf.Operation 'Cast' type=Cast>,\n",
|
||
" <tf.Operation 'add' type=AddV2>,\n",
|
||
" <tf.Operation 'zeros_like_1' type=Const>,\n",
|
||
" <tf.Operation 'Maximum' type=Maximum>,\n",
|
||
" <tf.Operation 'while/maximum_iterations' type=Const>,\n",
|
||
" <tf.Operation 'while/loop_counter' type=Const>,\n",
|
||
" <tf.Operation 'while' type=StatelessWhile>,\n",
|
||
" <tf.Operation 'Identity' type=Identity>]"
|
||
]
|
||
},
|
||
"execution_count": 213,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"add_10.get_concrete_function(tf.constant(0)).graph.get_operations()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### Handling Variables and Other Resources in TF Functions"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 214,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=int32, numpy=2>"
|
||
]
|
||
},
|
||
"execution_count": 214,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"counter = tf.Variable(0)\n",
|
||
"\n",
|
||
"@tf.function\n",
|
||
"def increment(counter, c=1):\n",
|
||
" return counter.assign_add(c)\n",
|
||
"\n",
|
||
"increment(counter) # counter is now equal to 1\n",
|
||
"increment(counter) # counter is now equal to 2"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 215,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"name: \"counter\"\n",
|
||
"type: DT_RESOURCE"
|
||
]
|
||
},
|
||
"execution_count": 215,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"function_def = increment.get_concrete_function(counter).function_def\n",
|
||
"function_def.signature.input_arg[0]"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 216,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"counter = tf.Variable(0)\n",
|
||
"\n",
|
||
"@tf.function\n",
|
||
"def increment(c=1):\n",
|
||
" return counter.assign_add(c)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 217,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=int32, numpy=2>"
|
||
]
|
||
},
|
||
"execution_count": 217,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"increment()\n",
|
||
"increment()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 218,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"name: \"assignaddvariableop_resource\"\n",
|
||
"type: DT_RESOURCE"
|
||
]
|
||
},
|
||
"execution_count": 218,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"function_def = increment.get_concrete_function().function_def\n",
|
||
"function_def.signature.input_arg[0]"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 219,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class Counter:\n",
|
||
" def __init__(self):\n",
|
||
" self.counter = tf.Variable(0)\n",
|
||
"\n",
|
||
" @tf.function\n",
|
||
" def increment(self, c=1):\n",
|
||
" return self.counter.assign_add(c)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 220,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=int32, numpy=2>"
|
||
]
|
||
},
|
||
"execution_count": 220,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"c = Counter()\n",
|
||
"c.increment()\n",
|
||
"c.increment()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 221,
|
||
"metadata": {
|
||
"scrolled": true
|
||
},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"def tf__add(x):\n",
|
||
" with ag__.FunctionScope('add_10', 'fscope', ag__.ConversionOptions(recursive=True, user_requested=True, optional_features=(), internal_convert_user_code=True)) as fscope:\n",
|
||
" do_return = False\n",
|
||
" retval_ = ag__.UndefinedReturnValue()\n",
|
||
"\n",
|
||
" def get_state():\n",
|
||
" return (x,)\n",
|
||
"\n",
|
||
" def set_state(vars_):\n",
|
||
" nonlocal x\n",
|
||
" (x,) = vars_\n",
|
||
"\n",
|
||
" def loop_body(itr):\n",
|
||
" nonlocal x\n",
|
||
" i = itr\n",
|
||
" x = ag__.ld(x)\n",
|
||
" x += 1\n",
|
||
" i = ag__.Undefined('i')\n",
|
||
" ag__.for_stmt(ag__.converted_call(ag__.ld(tf).range, (10,), None, fscope), None, loop_body, get_state, set_state, ('x',), {'iterate_names': 'i'})\n",
|
||
" try:\n",
|
||
" do_return = True\n",
|
||
" retval_ = ag__.ld(x)\n",
|
||
" except:\n",
|
||
" do_return = False\n",
|
||
" raise\n",
|
||
" return fscope.ret(retval_, do_return)\n",
|
||
"\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"@tf.function\n",
|
||
"def add_10(x):\n",
|
||
" for i in tf.range(10):\n",
|
||
" x += 1\n",
|
||
" return x\n",
|
||
"\n",
|
||
"print(tf.autograph.to_code(add_10.python_function))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 222,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"# extra code – shows how to display the autograph code with syntax highlighting\n",
|
||
"def display_tf_code(func):\n",
|
||
" from IPython.display import display, Markdown\n",
|
||
" if hasattr(func, \"python_function\"):\n",
|
||
" func = func.python_function\n",
|
||
" code = tf.autograph.to_code(func)\n",
|
||
" display(Markdown(f'```python\\n{code}\\n```'))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 223,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/markdown": [
|
||
"```python\n",
|
||
"def tf__add(x):\n",
|
||
" with ag__.FunctionScope('add_10', 'fscope', ag__.ConversionOptions(recursive=True, user_requested=True, optional_features=(), internal_convert_user_code=True)) as fscope:\n",
|
||
" do_return = False\n",
|
||
" retval_ = ag__.UndefinedReturnValue()\n",
|
||
"\n",
|
||
" def get_state():\n",
|
||
" return (x,)\n",
|
||
"\n",
|
||
" def set_state(vars_):\n",
|
||
" nonlocal x\n",
|
||
" (x,) = vars_\n",
|
||
"\n",
|
||
" def loop_body(itr):\n",
|
||
" nonlocal x\n",
|
||
" i = itr\n",
|
||
" x = ag__.ld(x)\n",
|
||
" x += 1\n",
|
||
" i = ag__.Undefined('i')\n",
|
||
" ag__.for_stmt(ag__.converted_call(ag__.ld(tf).range, (10,), None, fscope), None, loop_body, get_state, set_state, ('x',), {'iterate_names': 'i'})\n",
|
||
" try:\n",
|
||
" do_return = True\n",
|
||
" retval_ = ag__.ld(x)\n",
|
||
" except:\n",
|
||
" do_return = False\n",
|
||
" raise\n",
|
||
" return fscope.ret(retval_, do_return)\n",
|
||
"\n",
|
||
"```"
|
||
],
|
||
"text/plain": [
|
||
"<IPython.core.display.Markdown object>"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
}
|
||
],
|
||
"source": [
|
||
"display_tf_code(add_10)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Using TF Functions with tf.keras (or Not)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"By default, tf.keras will automatically convert your custom code into TF Functions, no need to use\n",
|
||
"`tf.function()`:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 224,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"# Custom loss function\n",
|
||
"def my_mse(y_true, y_pred):\n",
|
||
" print(\"Tracing loss my_mse()\")\n",
|
||
" return tf.reduce_mean(tf.square(y_pred - y_true))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 225,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"# Custom metric function\n",
|
||
"def my_mae(y_true, y_pred):\n",
|
||
" print(\"Tracing metric my_mae()\")\n",
|
||
" return tf.reduce_mean(tf.abs(y_pred - y_true))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 226,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"# Custom layer\n",
|
||
"class MyDense(tf.keras.layers.Layer):\n",
|
||
" def __init__(self, units, activation=None, **kwargs):\n",
|
||
" super().__init__(**kwargs)\n",
|
||
" self.units = units\n",
|
||
" self.activation = tf.keras.activations.get(activation)\n",
|
||
"\n",
|
||
" def build(self, input_shape):\n",
|
||
" self.kernel = self.add_weight(name='kernel',\n",
|
||
" shape=(input_shape[1], self.units),\n",
|
||
" initializer='uniform',\n",
|
||
" trainable=True)\n",
|
||
" self.biases = self.add_weight(name='bias',\n",
|
||
" shape=(self.units,),\n",
|
||
" initializer='zeros',\n",
|
||
" trainable=True)\n",
|
||
"\n",
|
||
" def call(self, X):\n",
|
||
" print(\"Tracing MyDense.call()\")\n",
|
||
" return self.activation(X @ self.kernel + self.biases)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 227,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 228,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"# Custom model\n",
|
||
"class MyModel(tf.keras.Model):\n",
|
||
" def __init__(self, **kwargs):\n",
|
||
" super().__init__(**kwargs)\n",
|
||
" self.hidden1 = MyDense(30, activation=\"relu\")\n",
|
||
" self.hidden2 = MyDense(30, activation=\"relu\")\n",
|
||
" self.output_ = MyDense(1)\n",
|
||
"\n",
|
||
" def call(self, input):\n",
|
||
" print(\"Tracing MyModel.call()\")\n",
|
||
" hidden1 = self.hidden1(input)\n",
|
||
" hidden2 = self.hidden2(hidden1)\n",
|
||
" concat = tf.keras.layers.concatenate([input, hidden2])\n",
|
||
" output = self.output_(concat)\n",
|
||
" return output\n",
|
||
"\n",
|
||
"model = MyModel()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 229,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model.compile(loss=my_mse, optimizer=\"nadam\", metrics=[my_mae])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 230,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"338/363 [==========================>...] - ETA: 0s - loss: 1.4975 - my_mae: 0.8461Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 1.4303 - my_mae: 0.8219 - val_loss: 0.4932 - val_my_mae: 0.4764\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.4386 - my_mae: 0.4760 - val_loss: 1.0322 - val_my_mae: 0.4793\n",
|
||
"162/162 [==============================] - 0s 659us/step - loss: 0.4204 - my_mae: 0.4711\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[0.4203691780567169, 0.4711269736289978]"
|
||
]
|
||
},
|
||
"execution_count": 230,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))\n",
|
||
"model.evaluate(X_test_scaled, y_test)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"You can turn this off by creating the model with `dynamic=True` (or calling `super().__init__(dynamic=True, **kwargs)` in the model's constructor):"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 231,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 232,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model = MyModel(dynamic=True)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 233,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model.compile(loss=my_mse, optimizer=\"nadam\", metrics=[my_mae])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Now the custom code will be called at each iteration. Let's fit, validate and evaluate with tiny datasets to avoid getting too much output:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 234,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[5.5450897216796875, 2.0603599548339844]"
|
||
]
|
||
},
|
||
"execution_count": 234,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.fit(X_train_scaled[:64], y_train[:64], epochs=1,\n",
|
||
" validation_data=(X_valid_scaled[:64], y_valid[:64]), verbose=0)\n",
|
||
"model.evaluate(X_test_scaled[:64], y_test[:64], verbose=0)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Alternatively, you can compile a model with `run_eagerly=True`:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 235,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 236,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model = MyModel()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 237,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model.compile(loss=my_mse, optimizer=\"nadam\", metrics=[my_mae], run_eagerly=True)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 238,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[5.5450897216796875, 2.0603599548339844]"
|
||
]
|
||
},
|
||
"execution_count": 238,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.fit(X_train_scaled[:64], y_train[:64], epochs=1,\n",
|
||
" validation_data=(X_valid_scaled[:64], y_valid[:64]), verbose=0)\n",
|
||
"model.evaluate(X_test_scaled[:64], y_test[:64], verbose=0)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Extra Material – Custom Optimizers"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Defining custom optimizers is not very common, but in case you are one of the happy few who gets to write one, here is an example:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 239,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class MyMomentumOptimizer(tf.keras.optimizers.Optimizer):\n",
|
||
" def __init__(self, learning_rate=0.001, momentum=0.9, name=\"MyMomentumOptimizer\", **kwargs):\n",
|
||
" \"\"\"Gradient descent with momentum optimizer.\"\"\"\n",
|
||
" super().__init__(name, **kwargs)\n",
|
||
" self._learning_rate = self._build_learning_rate(learning_rate)\n",
|
||
" self.momentum = momentum\n",
|
||
"\n",
|
||
" def build(self, var_list):\n",
|
||
" \"\"\"Initialize optimizer variables.\n",
|
||
"\n",
|
||
" Args:\n",
|
||
" var_list: list of model variables to build SGD variables on.\n",
|
||
" \"\"\"\n",
|
||
" super().build(var_list)\n",
|
||
" if getattr(self, \"_built\", False):\n",
|
||
" return\n",
|
||
" self.momentums = []\n",
|
||
" for var in var_list:\n",
|
||
" self.momentums.append(\n",
|
||
" self.add_variable_from_reference(\n",
|
||
" model_variable=var, variable_name=\"m\"\n",
|
||
" )\n",
|
||
" )\n",
|
||
" self._built = True\n",
|
||
"\n",
|
||
" def update_step(self, gradient, variable):\n",
|
||
" \"\"\"Update step given gradient and the associated model variable.\"\"\"\n",
|
||
" lr = tf.cast(self.learning_rate, variable.dtype)\n",
|
||
" m = None\n",
|
||
" var_key = self._var_key(variable)\n",
|
||
" momentum = tf.cast(self.momentum, variable.dtype)\n",
|
||
" m = self.momentums[self._index_dict[var_key]]\n",
|
||
" if m is None:\n",
|
||
" variable.assign_add(-gradient * lr)\n",
|
||
" else:\n",
|
||
" m.assign(-gradient * lr + m * momentum)\n",
|
||
" variable.assign_add(m)\n",
|
||
"\n",
|
||
" def get_config(self):\n",
|
||
" base_config = super().get_config()\n",
|
||
" print(\"Config!\")\n",
|
||
" return {\n",
|
||
" **base_config,\n",
|
||
" \"learning_rate\": self._serialize_hyperparameter(self._learning_rate),\n",
|
||
" \"momentum\": self.momentum,\n",
|
||
" }"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 240,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/5\n",
|
||
"363/363 [==============================] - 0s 666us/step - loss: 1.1844\n",
|
||
"Epoch 2/5\n",
|
||
"363/363 [==============================] - 0s 645us/step - loss: 0.5635\n",
|
||
"Epoch 3/5\n",
|
||
"363/363 [==============================] - 0s 644us/step - loss: 0.9703\n",
|
||
"Epoch 4/5\n",
|
||
"363/363 [==============================] - 0s 637us/step - loss: 0.5678\n",
|
||
"Epoch 5/5\n",
|
||
"363/363 [==============================] - 0s 643us/step - loss: 0.6350\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x1c7cdc08cd0>"
|
||
]
|
||
},
|
||
"execution_count": 240,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"optimizer = MyMomentumOptimizer()\n",
|
||
"\n",
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=[8])])\n",
|
||
"model.compile(loss=\"mse\", optimizer=optimizer)\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=5)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Let's compare that to Keras's built-in momentum optimizer:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 241,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/5\n",
|
||
"363/363 [==============================] - 0s 670us/step - loss: 1.1844\n",
|
||
"Epoch 2/5\n",
|
||
"363/363 [==============================] - 0s 651us/step - loss: 0.5635\n",
|
||
"Epoch 3/5\n",
|
||
"363/363 [==============================] - 0s 681us/step - loss: 0.9703\n",
|
||
"Epoch 4/5\n",
|
||
"363/363 [==============================] - 0s 684us/step - loss: 0.5678\n",
|
||
"Epoch 5/5\n",
|
||
"363/363 [==============================] - 0s 678us/step - loss: 0.6350\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x1c7cdb938b0>"
|
||
]
|
||
},
|
||
"execution_count": 241,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"optimizer = tf.keras.optimizers.SGD(learning_rate=0.001, momentum=0.9)\n",
|
||
"\n",
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=[8])])\n",
|
||
"model.compile(loss=\"mse\", optimizer=optimizer)\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=5)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Yep, we get the exact same model! 👍"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"# Exercises"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## 1. to 11."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"1. TensorFlow is an open-source library for numerical computation, particularly well suited and fine-tuned for large-scale Machine Learning. Its core is similar to NumPy, but it also features GPU support, support for distributed computing, computation graph analysis and optimization capabilities (with a portable graph format that allows you to train a TensorFlow model in one environment and run it in another), an optimization API based on reverse-mode autodiff, and several powerful APIs such as tf.keras, tf.data, tf.image, tf.signal, and more. Other popular Deep Learning libraries include PyTorch, MXNet, Microsoft Cognitive Toolkit, Theano, Caffe2, and Chainer.\n",
|
||
"2. Although TensorFlow offers most of the functionalities provided by NumPy, it is not a drop-in replacement, for a few reasons. First, the names of the functions are not always the same (for example, `tf.reduce_sum()` versus `np.sum()`). Second, some functions do not behave in exactly the same way (for example, `tf.transpose()` creates a transposed copy of a tensor, while NumPy's `T` attribute creates a transposed view, without actually copying any data). Lastly, NumPy arrays are mutable, while TensorFlow tensors are not (but you can use a `tf.Variable` if you need a mutable object).\n",
|
||
"3. Both `tf.range(10)` and `tf.constant(np.arange(10))` return a one-dimensional tensor containing the integers 0 to 9. However, the former uses 32-bit integers while the latter uses 64-bit integers. Indeed, TensorFlow defaults to 32 bits, while NumPy defaults to 64 bits.\n",
|
||
"4. Beyond regular tensors, TensorFlow offers several other data structures, including sparse tensors, tensor arrays, ragged tensors, queues, string tensors, and sets. The last two are actually represented as regular tensors, but TensorFlow provides special functions to manipulate them (in `tf.strings` and `tf.sets`).\n",
|
||
"5. When you want to define a custom loss function, in general you can just implement it as a regular Python function. However, if your custom loss function must support some hyperparameters (or any other state), then you should subclass the `keras.losses.Loss` class and implement the `__init__()` and `call()` methods. If you want the loss function's hyperparameters to be saved along with the model, then you must also implement the `get_config()` method.\n",
|
||
"6. Much like custom loss functions, most metrics can be defined as regular Python functions. But if you want your custom metric to support some hyperparameters (or any other state), then you should subclass the `keras.metrics.Metric` class. Moreover, if computing the metric over a whole epoch is not equivalent to computing the mean metric over all batches in that epoch (e.g., as for the precision and recall metrics), then you should subclass the `keras.metrics.Metric` class and implement the `__init__()`, `update_state()`, and `result()` methods to keep track of a running metric during each epoch. You should also implement the `reset_states()` method unless all it needs to do is reset all variables to 0.0. If you want the state to be saved along with the model, then you should implement the `get_config()` method as well.\n",
|
||
"7. You should distinguish the internal components of your model (i.e., layers or reusable blocks of layers) from the model itself (i.e., the object you will train). The former should subclass the `keras.layers.Layer` class, while the latter should subclass the `keras.models.Model` class.\n",
|
||
"8. Writing your own custom training loop is fairly advanced, so you should only do it if you really need to. Keras provides several tools to customize training without having to write a custom training loop: callbacks, custom regularizers, custom constraints, custom losses, and so on. You should use these instead of writing a custom training loop whenever possible: writing a custom training loop is more error-prone, and it will be harder to reuse the custom code you write. However, in some cases writing a custom training loop is necessary—for example, if you want to use different optimizers for different parts of your neural network, like in the [Wide & Deep paper](https://homl.info/widedeep). A custom training loop can also be useful when debugging, or when trying to understand exactly how training works.\n",
|
||
"9. Custom Keras components should be convertible to TF Functions, which means they should stick to TF operations as much as possible and respect all the rules listed in Chapter 12 (in the _TF Function Rules_ section). If you absolutely need to include arbitrary Python code in a custom component, you can either wrap it in a `tf.py_function()` operation (but this will reduce performance and limit your model's portability) or set `dynamic=True` when creating the custom layer or model (or set `run_eagerly=True` when calling the model's `compile()` method).\n",
|
||
"10. Please refer to Chapter 12 for the list of rules to respect when creating a TF Function (in the _TF Function Rules_ section).\n",
|
||
"11. Creating a dynamic Keras model can be useful for debugging, as it will not compile any custom component to a TF Function, and you can use any Python debugger to debug your code. It can also be useful if you want to include arbitrary Python code in your model (or in your training code), including calls to external libraries. To make a model dynamic, you must set `dynamic=True` when creating it. Alternatively, you can set `run_eagerly=True` when calling the model's `compile()` method. Making a model dynamic prevents Keras from using any of TensorFlow's graph features, so it will slow down training and inference, and you will not have the possibility to export the computation graph, which will limit your model's portability."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## 12. Implement a custom layer that performs _Layer Normalization_\n",
|
||
"_We will use this type of layer in Chapter 15 when using Recurrent Neural Networks._"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### a.\n",
|
||
"_Exercise: The `build()` method should define two trainable weights *α* and *β*, both of shape `input_shape[-1:]` and data type `tf.float32`. *α* should be initialized with 1s, and *β* with 0s._"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Solution: see below."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### b.\n",
|
||
"_Exercise: The `call()` method should compute the mean_ μ _and standard deviation_ σ _of each instance's features. For this, you can use `tf.nn.moments(inputs, axes=-1, keepdims=True)`, which returns the mean μ and the variance σ<sup>2</sup> of all instances (compute the square root of the variance to get the standard deviation). Then the function should compute and return *α*⊗(*X* - μ)/(σ + ε) + *β*, where ⊗ represents itemwise multiplication (`*`) and ε is a smoothing term (small constant to avoid division by zero, e.g., 0.001)._"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 242,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class LayerNormalization(tf.keras.layers.Layer):\n",
|
||
" def __init__(self, eps=0.001, **kwargs):\n",
|
||
" super().__init__(**kwargs)\n",
|
||
" self.eps = eps\n",
|
||
"\n",
|
||
" def build(self, batch_input_shape):\n",
|
||
" self.alpha = self.add_weight(\n",
|
||
" name=\"alpha\", shape=batch_input_shape[-1:],\n",
|
||
" initializer=\"ones\")\n",
|
||
" self.beta = self.add_weight(\n",
|
||
" name=\"beta\", shape=batch_input_shape[-1:],\n",
|
||
" initializer=\"zeros\")\n",
|
||
"\n",
|
||
" def call(self, X):\n",
|
||
" mean, variance = tf.nn.moments(X, axes=-1, keepdims=True)\n",
|
||
" return self.alpha * (X - mean) / (tf.sqrt(variance + self.eps)) + self.beta\n",
|
||
"\n",
|
||
" def get_config(self):\n",
|
||
" base_config = super().get_config()\n",
|
||
" return {**base_config, \"eps\": self.eps}"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Note that making _ε_ a hyperparameter (`eps`) was not compulsory. Also note that it's preferable to compute `tf.sqrt(variance + self.eps)` rather than `tf.sqrt(variance) + self.eps`. Indeed, the derivative of sqrt(z) is undefined when z=0, so training will bomb whenever the variance vector has at least one component equal to 0. Adding _ε_ within the square root guarantees that this will never happen."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### c.\n",
|
||
"_Exercise: Ensure that your custom layer produces the same (or very nearly the same) output as the `tf.keras.layers.LayerNormalization` layer._"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Let's create one instance of each class, apply them to some data (e.g., the training set), and ensure that the difference is negligeable."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 243,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=3.9782837e-08>"
|
||
]
|
||
},
|
||
"execution_count": 243,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"X = X_train.astype(np.float32)\n",
|
||
"\n",
|
||
"custom_layer_norm = LayerNormalization()\n",
|
||
"keras_layer_norm = tf.keras.layers.LayerNormalization()\n",
|
||
"\n",
|
||
"tf.reduce_mean(tf.keras.losses.mean_absolute_error(\n",
|
||
" keras_layer_norm(X), custom_layer_norm(X)))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Yep, that's close enough. To be extra sure, let's make alpha and beta completely random and compare again:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 244,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=1.764704e-08>"
|
||
]
|
||
},
|
||
"execution_count": 244,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"random_alpha = np.random.rand(X.shape[-1])\n",
|
||
"random_beta = np.random.rand(X.shape[-1])\n",
|
||
"\n",
|
||
"custom_layer_norm.set_weights([random_alpha, random_beta])\n",
|
||
"keras_layer_norm.set_weights([random_alpha, random_beta])\n",
|
||
"\n",
|
||
"tf.reduce_mean(tf.keras.losses.mean_absolute_error(\n",
|
||
" keras_layer_norm(X), custom_layer_norm(X)))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Still a negligeable difference! Our custom layer works fine."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## 13. Train a model using a custom training loop to tackle the Fashion MNIST dataset\n",
|
||
"_The Fashion MNIST dataset was introduced in Chapter 10._"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### a.\n",
|
||
"_Exercise: Display the epoch, iteration, mean training loss, and mean accuracy over each epoch (updated at each iteration), as well as the validation loss and accuracy at the end of each epoch._"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 245,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz\n",
|
||
"29515/29515 [==============================] - 0s 0us/step\n",
|
||
"Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz\n",
|
||
"26421880/26421880 [==============================] - 2s 0us/step\n",
|
||
"Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz\n",
|
||
"5148/5148 [==============================] - 0s 0s/step\n",
|
||
"Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz\n",
|
||
"4422102/4422102 [==============================] - 1s 0us/step\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"(X_train_full, y_train_full), (X_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()\n",
|
||
"X_train_full = X_train_full.astype(np.float32) / 255.\n",
|
||
"X_valid, X_train = X_train_full[:5000], X_train_full[5000:]\n",
|
||
"y_valid, y_train = y_train_full[:5000], y_train_full[5000:]\n",
|
||
"X_test = X_test.astype(np.float32) / 255."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 246,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 247,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Flatten(input_shape=[28, 28]),\n",
|
||
" tf.keras.layers.Dense(100, activation=\"relu\"),\n",
|
||
" tf.keras.layers.Dense(10, activation=\"softmax\"),\n",
|
||
"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 248,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"n_epochs = 5\n",
|
||
"batch_size = 32\n",
|
||
"n_steps = len(X_train) // batch_size\n",
|
||
"optimizer = tf.keras.optimizers.Nadam(learning_rate=0.01)\n",
|
||
"loss_fn = tf.keras.losses.sparse_categorical_crossentropy\n",
|
||
"mean_loss = tf.keras.metrics.Mean()\n",
|
||
"metrics = [tf.keras.metrics.SparseCategoricalAccuracy()]"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 249,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "18e1e887778f42819c7083b106ba15c7",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"All epochs: 0%| | 0/5 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "53af9eb383334e188f1babb56decf3d3",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 1/5: 0%| | 0/1718 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "76697b05c1954a67b6dd29862561a4c6",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 2/5: 0%| | 0/1718 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "ab1890692c164c69a79426015c8b1f16",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 3/5: 0%| | 0/1718 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "f449aae3983c4af2b91172f3abeebe87",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 4/5: 0%| | 0/1718 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "726499994a474e6195862056010b865c",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 5/5: 0%| | 0/1718 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
}
|
||
],
|
||
"source": [
|
||
"with trange(1, n_epochs + 1, desc=\"All epochs\") as epochs:\n",
|
||
" for epoch in epochs:\n",
|
||
" with trange(1, n_steps + 1, desc=f\"Epoch {epoch}/{n_epochs}\") as steps:\n",
|
||
" for step in steps:\n",
|
||
" X_batch, y_batch = random_batch(X_train, y_train)\n",
|
||
" with tf.GradientTape() as tape:\n",
|
||
" y_pred = model(X_batch)\n",
|
||
" main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred))\n",
|
||
" loss = tf.add_n([main_loss] + model.losses)\n",
|
||
" gradients = tape.gradient(loss, model.trainable_variables)\n",
|
||
" optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n",
|
||
" for variable in model.variables:\n",
|
||
" if variable.constraint is not None:\n",
|
||
" variable.assign(variable.constraint(variable))\n",
|
||
" status = OrderedDict()\n",
|
||
" mean_loss(loss)\n",
|
||
" status[\"loss\"] = mean_loss.result().numpy()\n",
|
||
" for metric in metrics:\n",
|
||
" metric(y_batch, y_pred)\n",
|
||
" status[metric.name] = metric.result().numpy()\n",
|
||
" steps.set_postfix(status)\n",
|
||
" y_pred = model(X_valid)\n",
|
||
" status[\"val_loss\"] = np.mean(loss_fn(y_valid, y_pred))\n",
|
||
" status[\"val_accuracy\"] = np.mean(tf.keras.metrics.sparse_categorical_accuracy(\n",
|
||
" tf.constant(y_valid, dtype=np.float32), y_pred))\n",
|
||
" steps.set_postfix(status)\n",
|
||
" for metric in [mean_loss] + metrics:\n",
|
||
" metric.reset_states()\n"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### b.\n",
|
||
"_Exercise: Try using a different optimizer with a different learning rate for the upper layers and the lower layers._"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 250,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 251,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"lower_layers = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Flatten(input_shape=[28, 28]),\n",
|
||
" tf.keras.layers.Dense(100, activation=\"relu\"),\n",
|
||
"])\n",
|
||
"upper_layers = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Dense(10, activation=\"softmax\"),\n",
|
||
"])\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" lower_layers, upper_layers\n",
|
||
"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 252,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"lower_optimizer = tf.keras.optimizers.SGD(learning_rate=1e-4)\n",
|
||
"upper_optimizer = tf.keras.optimizers.Nadam(learning_rate=1e-3)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 253,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"n_epochs = 5\n",
|
||
"batch_size = 32\n",
|
||
"n_steps = len(X_train) // batch_size\n",
|
||
"loss_fn = tf.keras.losses.sparse_categorical_crossentropy\n",
|
||
"mean_loss = tf.keras.metrics.Mean()\n",
|
||
"metrics = [tf.keras.metrics.SparseCategoricalAccuracy()]"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 254,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "2ad1799119244d4ab9a627887deff693",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"All epochs: 0%| | 0/5 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "9cb8d0fc3b654140ba2221b12c9825ff",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 1/5: 0%| | 0/1718 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "46e11226faa24ab08025845323a5edb5",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 2/5: 0%| | 0/1718 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "3f1f3690c314406ab6d093829bc469b8",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 3/5: 0%| | 0/1718 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "728081ca6ba34450823b2bfad07da850",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 4/5: 0%| | 0/1718 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "7ff06fc06fd94a4c8569c80614189c1d",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 5/5: 0%| | 0/1718 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
}
|
||
],
|
||
"source": [
|
||
"with trange(1, n_epochs + 1, desc=\"All epochs\") as epochs:\n",
|
||
" for epoch in epochs:\n",
|
||
" with trange(1, n_steps + 1, desc=f\"Epoch {epoch}/{n_epochs}\") as steps:\n",
|
||
" for step in steps:\n",
|
||
" X_batch, y_batch = random_batch(X_train, y_train)\n",
|
||
" with tf.GradientTape(persistent=True) as tape:\n",
|
||
" y_pred = model(X_batch)\n",
|
||
" main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred))\n",
|
||
" loss = tf.add_n([main_loss] + model.losses)\n",
|
||
" for layers, optimizer in ((lower_layers, lower_optimizer),\n",
|
||
" (upper_layers, upper_optimizer)):\n",
|
||
" gradients = tape.gradient(loss, layers.trainable_variables)\n",
|
||
" optimizer.apply_gradients(zip(gradients, layers.trainable_variables))\n",
|
||
" del tape\n",
|
||
" for variable in model.variables:\n",
|
||
" if variable.constraint is not None:\n",
|
||
" variable.assign(variable.constraint(variable))\n",
|
||
" status = OrderedDict()\n",
|
||
" mean_loss(loss)\n",
|
||
" status[\"loss\"] = mean_loss.result().numpy()\n",
|
||
" for metric in metrics:\n",
|
||
" metric(y_batch, y_pred)\n",
|
||
" status[metric.name] = metric.result().numpy()\n",
|
||
" steps.set_postfix(status)\n",
|
||
" y_pred = model(X_valid)\n",
|
||
" status[\"val_loss\"] = np.mean(loss_fn(y_valid, y_pred))\n",
|
||
" status[\"val_accuracy\"] = np.mean(tf.keras.metrics.sparse_categorical_accuracy(\n",
|
||
" tf.constant(y_valid, dtype=np.float32), y_pred))\n",
|
||
" steps.set_postfix(status)\n",
|
||
" for metric in [mean_loss] + metrics:\n",
|
||
" metric.reset_states()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": null,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": []
|
||
}
|
||
],
|
||
"metadata": {
|
||
"kernelspec": {
|
||
"display_name": "Python 3",
|
||
"language": "python",
|
||
"name": "python3"
|
||
},
|
||
"language_info": {
|
||
"codemirror_mode": {
|
||
"name": "ipython",
|
||
"version": 3
|
||
},
|
||
"file_extension": ".py",
|
||
"mimetype": "text/x-python",
|
||
"name": "python",
|
||
"nbconvert_exporter": "python",
|
||
"pygments_lexer": "ipython3",
|
||
"version": "3.10.11"
|
||
}
|
||
},
|
||
"nbformat": 4,
|
||
"nbformat_minor": 4
|
||
}
|