6562 lines
212 KiB
Plaintext
6562 lines
212 KiB
Plaintext
{
|
||
"cells": [
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"**Chapter 12 – Custom Models and Training with TensorFlow**"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"_This notebook contains all the sample code and solutions to the exercises in chapter 12, as well as code examples from Appendix C_"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"<table align=\"left\">\n",
|
||
" <td>\n",
|
||
" <a href=\"https://colab.research.google.com/github/ageron/handson-ml3/blob/main/12_custom_models_and_training_with_tensorflow.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\n",
|
||
" </td>\n",
|
||
" <td>\n",
|
||
" <a target=\"_blank\" href=\"https://kaggle.com/kernels/welcome?src=https://github.com/ageron/handson-ml3/blob/main/12_custom_models_and_training_with_tensorflow.ipynb\"><img src=\"https://kaggle.com/static/images/open-in-kaggle.svg\" /></a>\n",
|
||
" </td>\n",
|
||
"</table>"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {
|
||
"tags": []
|
||
},
|
||
"source": [
|
||
"# Setup"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"This project requires Python 3.7 or above:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 1,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"import sys\n",
|
||
"\n",
|
||
"assert sys.version_info >= (3, 7)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"And TensorFlow ≥ 2.8:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 2,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"from packaging import version\n",
|
||
"import tensorflow as tf\n",
|
||
"\n",
|
||
"assert version.parse(tf.__version__) >= version.parse(\"2.8.0\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Using TensorFlow like NumPy"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### Tensors and Operations"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"#### Tensors"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 3,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(2, 3), dtype=float32, numpy=\n",
|
||
"array([[1., 2., 3.],\n",
|
||
" [4., 5., 6.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 3,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"t = tf.constant([[1., 2., 3.], [4., 5., 6.]]) # matrix\n",
|
||
"t"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 4,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"TensorShape([2, 3])"
|
||
]
|
||
},
|
||
"execution_count": 4,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"t.shape"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 5,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"tf.float32"
|
||
]
|
||
},
|
||
"execution_count": 5,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"t.dtype"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"#### Indexing"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 6,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(2, 2), dtype=float32, numpy=\n",
|
||
"array([[2., 3.],\n",
|
||
" [5., 6.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 6,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"t[:, 1:]"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 7,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(2, 1), dtype=float32, numpy=\n",
|
||
"array([[2.],\n",
|
||
" [5.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 7,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"t[..., 1, tf.newaxis]"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"#### Ops"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 8,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(2, 3), dtype=float32, numpy=\n",
|
||
"array([[11., 12., 13.],\n",
|
||
" [14., 15., 16.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 8,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"t + 10"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 9,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(2, 3), dtype=float32, numpy=\n",
|
||
"array([[ 1., 4., 9.],\n",
|
||
" [16., 25., 36.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 9,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.square(t)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 10,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(2, 2), dtype=float32, numpy=\n",
|
||
"array([[14., 32.],\n",
|
||
" [32., 77.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 10,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"t @ tf.transpose(t)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"#### Scalars"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 11,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=int32, numpy=42>"
|
||
]
|
||
},
|
||
"execution_count": 11,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.constant(42)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"#### Keras's low-level API"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"You may still run across code that uses Keras's low-level API:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 12,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(3, 2), dtype=float32, numpy=\n",
|
||
"array([[11., 26.],\n",
|
||
" [14., 35.],\n",
|
||
" [19., 46.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 12,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"K = tf.keras.backend\n",
|
||
"K.square(K.transpose(t)) + 10"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"But since Keras does not support multiple backends anymore, you should instead use TF's low-level API directly:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 13,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(3, 2), dtype=float32, numpy=\n",
|
||
"array([[11., 26.],\n",
|
||
" [14., 35.],\n",
|
||
" [19., 46.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 13,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.square(tf.transpose(t)) + 10"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### Tensors and NumPy"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 14,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(3,), dtype=float64, numpy=array([2., 4., 5.])>"
|
||
]
|
||
},
|
||
"execution_count": 14,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"import numpy as np\n",
|
||
"\n",
|
||
"a = np.array([2., 4., 5.])\n",
|
||
"tf.constant(a)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 15,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"array([[1., 2., 3.],\n",
|
||
" [4., 5., 6.]], dtype=float32)"
|
||
]
|
||
},
|
||
"execution_count": 15,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"t.numpy()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 16,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"array([[1., 2., 3.],\n",
|
||
" [4., 5., 6.]], dtype=float32)"
|
||
]
|
||
},
|
||
"execution_count": 16,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"np.array(t)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 17,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(3,), dtype=float64, numpy=array([ 4., 16., 25.])>"
|
||
]
|
||
},
|
||
"execution_count": 17,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.square(a)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 18,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"array([[ 1., 4., 9.],\n",
|
||
" [16., 25., 36.]], dtype=float32)"
|
||
]
|
||
},
|
||
"execution_count": 18,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"np.square(t)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### Type Conversions"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 19,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"cannot compute AddV2 as input #1(zero-based) was expected to be a float tensor but is a int32 tensor [Op:AddV2] name: \n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"try:\n",
|
||
" tf.constant(2.0) + tf.constant(40)\n",
|
||
"except tf.errors.InvalidArgumentError as ex:\n",
|
||
" print(ex)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 20,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"cannot compute AddV2 as input #1(zero-based) was expected to be a float tensor but is a double tensor [Op:AddV2] name: \n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"try:\n",
|
||
" tf.constant(2.0) + tf.constant(40., dtype=tf.float64)\n",
|
||
"except tf.errors.InvalidArgumentError as ex:\n",
|
||
" print(ex)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 21,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=42.0>"
|
||
]
|
||
},
|
||
"execution_count": 21,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"t2 = tf.constant(40., dtype=tf.float64)\n",
|
||
"tf.constant(2.0) + tf.cast(t2, tf.float32)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### Variables"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 22,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=\n",
|
||
"array([[1., 2., 3.],\n",
|
||
" [4., 5., 6.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 22,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"v = tf.Variable([[1., 2., 3.], [4., 5., 6.]])\n",
|
||
"v"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 23,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Variable 'UnreadVariable' shape=(2, 3) dtype=float32, numpy=\n",
|
||
"array([[ 2., 4., 6.],\n",
|
||
" [ 8., 10., 12.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 23,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"v.assign(2 * v)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 24,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Variable 'UnreadVariable' shape=(2, 3) dtype=float32, numpy=\n",
|
||
"array([[ 2., 42., 6.],\n",
|
||
" [ 8., 10., 12.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 24,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"v[0, 1].assign(42)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 25,
|
||
"metadata": {
|
||
"tags": []
|
||
},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Variable 'UnreadVariable' shape=(2, 3) dtype=float32, numpy=\n",
|
||
"array([[ 2., 42., 0.],\n",
|
||
" [ 8., 10., 1.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 25,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"v[:, 2].assign([0., 1.])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 26,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Variable 'UnreadVariable' shape=(2, 3) dtype=float32, numpy=\n",
|
||
"array([[100., 42., 0.],\n",
|
||
" [ 8., 10., 200.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 26,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"v.scatter_nd_update(\n",
|
||
" indices=[[0, 0], [1, 2]], updates=[100., 200.])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 27,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Variable 'UnreadVariable' shape=(2, 3) dtype=float32, numpy=\n",
|
||
"array([[4., 5., 6.],\n",
|
||
" [1., 2., 3.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 27,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows how to use scatter_update()\n",
|
||
"sparse_delta = tf.IndexedSlices(values=[[1., 2., 3.], [4., 5., 6.]],\n",
|
||
" indices=[1, 0])\n",
|
||
"v.scatter_update(sparse_delta)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 28,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"'ResourceVariable' object does not support item assignment\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"try:\n",
|
||
" v[1] = [7., 8., 9.]\n",
|
||
"except TypeError as ex:\n",
|
||
" print(ex)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### Strings"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"The code in this section and all the following sections in appendix C"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 29,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=string, numpy=b'hello world'>"
|
||
]
|
||
},
|
||
"execution_count": 29,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.constant(b\"hello world\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 30,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=string, numpy=b'caf\\xc3\\xa9'>"
|
||
]
|
||
},
|
||
"execution_count": 30,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.constant(\"café\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 31,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 99, 97, 102, 233], dtype=int32)>"
|
||
]
|
||
},
|
||
"execution_count": 31,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"u = tf.constant([ord(c) for c in \"café\"])\n",
|
||
"u"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 32,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=int32, numpy=4>"
|
||
]
|
||
},
|
||
"execution_count": 32,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"b = tf.strings.unicode_encode(u, \"UTF-8\")\n",
|
||
"tf.strings.length(b, unit=\"UTF8_CHAR\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 33,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 99, 97, 102, 233], dtype=int32)>"
|
||
]
|
||
},
|
||
"execution_count": 33,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.strings.unicode_decode(b, \"UTF-8\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### Other Data Structures"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"The code in this section is in Appendix C."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"#### String arrays"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 34,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=string, numpy=b'hello world'>"
|
||
]
|
||
},
|
||
"execution_count": 34,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.constant(b\"hello world\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 35,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=string, numpy=b'caf\\xc3\\xa9'>"
|
||
]
|
||
},
|
||
"execution_count": 35,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.constant(\"café\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 36,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 99, 97, 102, 233], dtype=int32)>"
|
||
]
|
||
},
|
||
"execution_count": 36,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"u = tf.constant([ord(c) for c in \"café\"])\n",
|
||
"u"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 37,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=string, numpy=b'caf\\xc3\\xa9'>"
|
||
]
|
||
},
|
||
"execution_count": 37,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"b = tf.strings.unicode_encode(u, \"UTF-8\")\n",
|
||
"b"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 38,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=int32, numpy=4>"
|
||
]
|
||
},
|
||
"execution_count": 38,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.strings.length(b, unit=\"UTF8_CHAR\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 39,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 99, 97, 102, 233], dtype=int32)>"
|
||
]
|
||
},
|
||
"execution_count": 39,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.strings.unicode_decode(b, \"UTF-8\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 40,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"p = tf.constant([\"Café\", \"Coffee\", \"caffè\", \"咖啡\"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 41,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(4,), dtype=int32, numpy=array([4, 6, 5, 2], dtype=int32)>"
|
||
]
|
||
},
|
||
"execution_count": 41,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.strings.length(p, unit=\"UTF8_CHAR\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 42,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.RaggedTensor [[67, 97, 102, 233], [67, 111, 102, 102, 101, 101],\n",
|
||
" [99, 97, 102, 102, 232], [21654, 21857]]>"
|
||
]
|
||
},
|
||
"execution_count": 42,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"r = tf.strings.unicode_decode(p, \"UTF8\")\n",
|
||
"r"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"#### Ragged tensors"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 43,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(6,), dtype=int32, numpy=array([ 67, 111, 102, 102, 101, 101], dtype=int32)>"
|
||
]
|
||
},
|
||
"execution_count": 43,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"r[1]"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 44,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.RaggedTensor [[67, 111, 102, 102, 101, 101], [99, 97, 102, 102, 232]]>"
|
||
]
|
||
},
|
||
"execution_count": 44,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"r[1:3] # extra code – a slice of a ragged tensor is a ragged tensor"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 45,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.RaggedTensor [[67, 97, 102, 233], [67, 111, 102, 102, 101, 101],\n",
|
||
" [99, 97, 102, 102, 232], [21654, 21857], [65, 66], [], [67]]>"
|
||
]
|
||
},
|
||
"execution_count": 45,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"r2 = tf.ragged.constant([[65, 66], [], [67]])\n",
|
||
"tf.concat([r, r2], axis=0)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 46,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"<tf.RaggedTensor [[67, 97, 102, 233, 68, 69, 70], [67, 111, 102, 102, 101, 101, 71],\n",
|
||
" [99, 97, 102, 102, 232], [21654, 21857, 72, 73]]>\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"r3 = tf.ragged.constant([[68, 69, 70], [71], [], [72, 73]])\n",
|
||
"print(tf.concat([r, r3], axis=1))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 47,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(4, 6), dtype=int32, numpy=\n",
|
||
"array([[ 67, 97, 102, 233, 0, 0],\n",
|
||
" [ 67, 111, 102, 102, 101, 101],\n",
|
||
" [ 99, 97, 102, 102, 232, 0],\n",
|
||
" [21654, 21857, 0, 0, 0, 0]], dtype=int32)>"
|
||
]
|
||
},
|
||
"execution_count": 47,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"r.to_tensor()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"#### Sparse tensors"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 48,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"s = tf.SparseTensor(indices=[[0, 1], [1, 0], [2, 3]],\n",
|
||
" values=[1., 2., 3.],\n",
|
||
" dense_shape=[3, 4])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 49,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(3, 4), dtype=float32, numpy=\n",
|
||
"array([[0., 1., 0., 0.],\n",
|
||
" [2., 0., 0., 0.],\n",
|
||
" [0., 0., 0., 3.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 49,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.sparse.to_dense(s)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 50,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"SparseTensor(indices=tf.Tensor(\n",
|
||
"[[0 1]\n",
|
||
" [1 0]\n",
|
||
" [2 3]], shape=(3, 2), dtype=int64), values=tf.Tensor([ 42. 84. 126.], shape=(3,), dtype=float32), dense_shape=tf.Tensor([3 4], shape=(2,), dtype=int64))"
|
||
]
|
||
},
|
||
"execution_count": 50,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"s * 42.0"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 51,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"unsupported operand type(s) for +: 'SparseTensor' and 'float'\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"try:\n",
|
||
" s + 42.0\n",
|
||
"except TypeError as ex:\n",
|
||
" print(ex)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 52,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(3, 2), dtype=float32, numpy=\n",
|
||
"array([[ 30., 40.],\n",
|
||
" [ 20., 40.],\n",
|
||
" [210., 240.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 52,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows how to multiply a sparse tensor and a dense tensor\n",
|
||
"s4 = tf.constant([[10., 20.], [30., 40.], [50., 60.], [70., 80.]])\n",
|
||
"tf.sparse.sparse_dense_matmul(s, s4)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 53,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"{{function_node __wrapped__SparseToDense_device_/job:localhost/replica:0/task:0/device:CPU:0}} indices[1] = [0,1] is out of order. Many sparse ops require sorted indices.\n",
|
||
" Use `tf.sparse.reorder` to create a correctly ordered copy.\n",
|
||
"\n",
|
||
" [Op:SparseToDense] name: \n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"2023-09-05 11:03:52.814492: W tensorflow/core/framework/op_kernel.cc:1828] OP_REQUIRES failed at sparse_to_dense_op.cc:161 : INVALID_ARGUMENT: indices[1] = [0,1] is out of order. Many sparse ops require sorted indices.\n",
|
||
" Use `tf.sparse.reorder` to create a correctly ordered copy.\n",
|
||
"\n",
|
||
"\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – when creating a sparse tensor, values must be given in \"reading\n",
|
||
"# order\", or else `to_dense()` will fail.\n",
|
||
"s5 = tf.SparseTensor(indices=[[0, 2], [0, 1]], # WRONG ORDER!\n",
|
||
" values=[1., 2.],\n",
|
||
" dense_shape=[3, 4])\n",
|
||
"try:\n",
|
||
" tf.sparse.to_dense(s5)\n",
|
||
"except tf.errors.InvalidArgumentError as ex:\n",
|
||
" print(ex)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 54,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(3, 4), dtype=float32, numpy=\n",
|
||
"array([[0., 2., 1., 0.],\n",
|
||
" [0., 0., 0., 0.],\n",
|
||
" [0., 0., 0., 0.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 54,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows how to fix the sparse tensor s5 by reordering its values\n",
|
||
"s6 = tf.sparse.reorder(s5)\n",
|
||
"tf.sparse.to_dense(s6)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"#### Tensor Arrays"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 55,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"array = tf.TensorArray(dtype=tf.float32, size=3)\n",
|
||
"array = array.write(0, tf.constant([1., 2.]))\n",
|
||
"array = array.write(1, tf.constant([3., 10.]))\n",
|
||
"array = array.write(2, tf.constant([5., 7.]))\n",
|
||
"tensor1 = array.read(1) # returns (and zeros out!) tf.constant([3., 10.])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 56,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(3, 2), dtype=float32, numpy=\n",
|
||
"array([[1., 2.],\n",
|
||
" [0., 0.],\n",
|
||
" [5., 7.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 56,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"array.stack()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 57,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(3, 2), dtype=float32, numpy=\n",
|
||
"array([[ 1., 2.],\n",
|
||
" [ 3., 10.],\n",
|
||
" [ 5., 7.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 57,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows how to disable clear_after_read\n",
|
||
"array2 = tf.TensorArray(dtype=tf.float32, size=3, clear_after_read=False)\n",
|
||
"array2 = array2.write(0, tf.constant([1., 2.]))\n",
|
||
"array2 = array2.write(1, tf.constant([3., 10.]))\n",
|
||
"array2 = array2.write(2, tf.constant([5., 7.]))\n",
|
||
"tensor2 = array2.read(1) # returns tf.constant([3., 10.])\n",
|
||
"array2.stack()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 58,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(3, 2), dtype=float32, numpy=\n",
|
||
"array([[1., 2.],\n",
|
||
" [0., 0.],\n",
|
||
" [5., 7.]], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 58,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows how to create and use a tensor array with a dynamic size\n",
|
||
"array3 = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)\n",
|
||
"array3 = array3.write(0, tf.constant([1., 2.]))\n",
|
||
"array3 = array3.write(1, tf.constant([3., 10.]))\n",
|
||
"array3 = array3.write(2, tf.constant([5., 7.]))\n",
|
||
"tensor3 = array3.read(1)\n",
|
||
"array3.stack()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"#### Sets"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 59,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"SparseTensor(indices=tf.Tensor(\n",
|
||
"[[0 0]\n",
|
||
" [0 1]\n",
|
||
" [0 2]\n",
|
||
" [0 3]\n",
|
||
" [0 4]], shape=(5, 2), dtype=int64), values=tf.Tensor([ 1 5 6 9 11], shape=(5,), dtype=int32), dense_shape=tf.Tensor([1 5], shape=(2,), dtype=int64))"
|
||
]
|
||
},
|
||
"execution_count": 59,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"a = tf.constant([[1, 5, 9]])\n",
|
||
"b = tf.constant([[5, 6, 9, 11]])\n",
|
||
"u = tf.sets.union(a, b)\n",
|
||
"u"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 60,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(1, 5), dtype=int32, numpy=array([[ 1, 5, 6, 9, 11]], dtype=int32)>"
|
||
]
|
||
},
|
||
"execution_count": 60,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.sparse.to_dense(u)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 61,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(2, 5), dtype=int32, numpy=\n",
|
||
"array([[ 1, 5, 6, 9, 11],\n",
|
||
" [ 0, 10, 13, 0, 0]], dtype=int32)>"
|
||
]
|
||
},
|
||
"execution_count": 61,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"a = tf.constant([[1, 5, 9], [10, 0, 0]])\n",
|
||
"b = tf.constant([[5, 6, 9, 11], [13, 0, 0, 0]])\n",
|
||
"u = tf.sets.union(a, b)\n",
|
||
"tf.sparse.to_dense(u)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 62,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(2, 5), dtype=int32, numpy=\n",
|
||
"array([[ 1, 5, 6, 9, 11],\n",
|
||
" [-1, 10, 13, -1, -1]], dtype=int32)>"
|
||
]
|
||
},
|
||
"execution_count": 62,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows how to use a different default value: -1 in this case\n",
|
||
"a = tf.constant([[1, 5, 9], [10, -1, -1]])\n",
|
||
"b = tf.constant([[5, 6, 9, 11], [13, -1, -1, -1]])\n",
|
||
"u = tf.sets.union(a, b)\n",
|
||
"tf.sparse.to_dense(u, default_value=-1)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 63,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(2, 3), dtype=int32, numpy=\n",
|
||
"array([[2, 3, 7],\n",
|
||
" [7, 0, 0]], dtype=int32)>"
|
||
]
|
||
},
|
||
"execution_count": 63,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows how to use `tf.sets.difference()`\n",
|
||
"set1 = tf.constant([[2, 3, 5, 7], [7, 9, 0, 0]])\n",
|
||
"set2 = tf.constant([[4, 5, 6], [9, 10, 0]])\n",
|
||
"tf.sparse.to_dense(tf.sets.difference(set1, set2))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 64,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(2, 2), dtype=int32, numpy=\n",
|
||
"array([[5, 0],\n",
|
||
" [0, 9]], dtype=int32)>"
|
||
]
|
||
},
|
||
"execution_count": 64,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows how to use `tf.sets.difference()`\n",
|
||
"tf.sparse.to_dense(tf.sets.intersection(set1, set2))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 65,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(1,), dtype=bool, numpy=array([ True])>"
|
||
]
|
||
},
|
||
"execution_count": 65,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – check whether set1[0] contains 5\n",
|
||
"tf.sets.size(tf.sets.intersection(set1[:1], tf.constant([[5, 0, 0, 0]]))) > 0"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"#### Queues"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 66,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=int32, numpy=2>"
|
||
]
|
||
},
|
||
"execution_count": 66,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"q = tf.queue.FIFOQueue(3, [tf.int32, tf.string], shapes=[(), ()])\n",
|
||
"q.enqueue([10, b\"windy\"])\n",
|
||
"q.enqueue([15, b\"sunny\"])\n",
|
||
"q.size()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 67,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor: shape=(), dtype=int32, numpy=10>,\n",
|
||
" <tf.Tensor: shape=(), dtype=string, numpy=b'windy'>]"
|
||
]
|
||
},
|
||
"execution_count": 67,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"q.dequeue()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 68,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"q.enqueue_many([[13, 16], [b'cloudy', b'rainy']])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 69,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor: shape=(3,), dtype=int32, numpy=array([15, 13, 16], dtype=int32)>,\n",
|
||
" <tf.Tensor: shape=(3,), dtype=string, numpy=array([b'sunny', b'cloudy', b'rainy'], dtype=object)>]"
|
||
]
|
||
},
|
||
"execution_count": 69,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"q.dequeue_many(3)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Custom loss function"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 70,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"def huber_fn(y_true, y_pred):\n",
|
||
" error = y_true - y_pred\n",
|
||
" is_small_error = tf.abs(error) < 1\n",
|
||
" squared_loss = tf.square(error) / 2\n",
|
||
" linear_loss = tf.abs(error) - 0.5\n",
|
||
" return tf.where(is_small_error, squared_loss, linear_loss)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 71,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAqQAAAFkCAYAAAD2RimAAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/NK7nSAAAACXBIWXMAAA9hAAAPYQGoP6dpAAB5/UlEQVR4nO3dd1xV5R/A8c9lg4IbRQVn7hmWYu49Mi3LmaOyX+YoMzNRK83VMHPkzJWaq9w5McW9cKSZmeZWECcoMi5wfn88XRAB5V7GuRe+79eLl5zDPfd+eTzc+z3PeZ7vY9A0TUMIIYQQQgid2OkdgBBCCCGEyNkkIRVCCCGEELqShFQIIYQQQuhKElIhhBBCCKErSUiFEEIIIYSuJCEVQgghhBC6koRUCCGEEELoShJSIYQQQgihK0lIhRBCCCGEriQhFUKIVIwaNQqDwUBgYKDeoSTTqFEjDAaD3mEIIUSGkIRUCGFTLl26hMFgoFWrVqk+5uDBgxgMBnr37p11gQkhhLCYJKRCCCGEEEJXkpAKIYQQQghdSUIqhMgxSpYsScmSJVP82bPGZP74449UrlwZFxcXfHx88Pf3JyoqKsXHnjx5ki5duuDl5YWTkxMlSpRg4MCB3LlzJ8njTMMPevfuzd9//81rr71GwYIFMRgMXLp0yaLfMTY2lu+//57q1avj6upKnjx5aNy4MRs3bkz22Pj4eObOncuLL75I/vz5cXNzo2TJknTo0IHdu3cneeyqVato2LAhnp6euLi44O3tTatWrVi7dq1FcQohxOMc9A5ACCGs3XfffUdgYCCdO3fm5ZdfZtOmTXz11VccP36czZs3J0lk169fT6dOnbC3t+eVV17B29ubv/76ix9++IGtW7dy6NAh8uXLl+T5z58/T506dahcuTK9evXi7t27ODk5mR2npml07tyZ1atXU65cOfr3709ERAQrV67k5ZdfZsqUKXzwwQcJj/f39+ebb76hTJkydOvWDXd3d65fv86ePXvYsWMHDRo0AGDmzJn069cPLy8vXn31VQoUKEBwcDCHDx9m7dq1dOjQwbKGFUKI/0hCKoSwSefPn2fUqFEp/uzatWsZ+lrbt28nKCiIypUrAzBu3DjatGnD1q1bWbJkCT169ADgzp079OjRg0KFCrFv3z58fHwSnmPZsmV069aNzz//nGnTpiV5/n379vHZZ5/x5ZdfpivOJUuWsHr1aho2bMi2bdsSktoRI0bg6+vLkCFDaNeuHaVKlQJg7ty5FCtWjJMnT+Lm5pbwPJqmce/evYTtuXPn4uTkxB9//EGhQoWSvOaTvb5CCGEJSUiFEDbp33//ZfTo0VnyWj169EhIRgEcHBwYP348AQEB/PTTTwkJ6aJFiwgPD2f69OlJklGArl27MnHiRJYvX54sIS1SpAgjR45Md5wLFy4E4JtvvknSw1q8eHE++ugj/P39+fnnn5O8lpOTEw4OST8KDAYD+fPnT7LP0dERR0fHZK9ZoECBdMcthBCSkAohbFLLli3ZsmVLij87ePAgfn5+GfZa9evXT7avVq1auLq6cuLEiSSva/r3/PnzyY6Jiori9u3b3L59m4IFCybsr169ukW36J90/PhxXF1defHFF5P9rFGjRgBJ4u3UqROzZs2iSpUqdO7cmYYNG+Ln50euXLmSHNupUyeGDRtGlSpV6NKlC40aNaJevXrkzZs33TELIQRIQiqEEM/k6emZ6v7r168nbN+9exeA6dOnP/X5IiIikiSkhQsXzoAoITw8HG9v7xR/VqRIEQDCwsIS9k2dOpXSpUuzcOFCxo4dy9ixY3FxcaFTp0589913CTEOHTqUAgUKMGvWLCZNmsR3332Hg4MDbdq0YfLkyQlDAIQQwlIyy14IkWPY2dkRGxub4s8eT9SeFBoamur+PHnyJGx7eHgAcOrUKTRNS/WrRIkSSZ4no1Zc8vDw4ObNmyn+zLTfFCOo2/CffPIJp0+f5vr16yxdupT69euzaNEiunfvniS+Pn36EBQUxK1bt1izZg2vvfYa69evp23btsTFxWVI/EKInEsSUiFEjpEvXz5CQ0OTJaURERGcO3cu1eP27NmTbF9QUBCRkZHUqFEjYV/t2rUBOHDgQMYEbKaaNWsSGRnJ4cOHk/1s165dAEnifVzRokXp2rUrW7Zs4bnnnmP79u1ERkYme1yBAgXo0KEDK1asoEmTJpw5cybF4QlCCGEOSUiFEDlGrVq1MBqN/Pzzzwn7NE3D39+fiIiIVI9bvHgxp0+fTtiOjY1l+PDhAPTq1Sth/1tvvYW7uzsjRoxI8niTR48eJYwzzQymWPz9/TEajQn7r1+/zqRJk3BwcEjo+YyOjmbHjh1ompbkOSIiInjw4AGOjo7Y29sDsHXr1mRJvNFoTBii4Orqmmm/kxAiZ5AxpEKIHGPAgAEsWLCAPn36EBAQQKFChdizZw/379+nevXq/PHHHyke16xZM+rUqUOXLl3Inz8/mzZt4s8//6Rly5a8+eabCY8rVKgQy5Yt44033qB69eq0atWKChUqEBUVxeXLl9m1axd169ZNdTJWevXo0YPVq1ezbt06qlWrxssvv5xQh/TOnTt89913lC5dGoDIyEiaNm1K6dKlqV27Nj4+Pjx8+JDffvuNkJAQPv3004SJVp07d8bNzY169epRokQJjEYjAQEB/PXXX3Tu3DlZRQEhhDCXJKRCiByjatWqbNmyheHDh/Prr7+SO3du2rRpw7fffkvnzp1TPe7jjz+mXbt2TJkyhX///ZdChQoxbNgwPv/882TjP9u2bcvx48f59ttv2b59OwEBAeTKlYvixYvz1ltvJUlgM5rBYODXX39lypQp/PTTT0ybNg0nJyeef/55Bg8ezCuvvJLw2Fy5cvH111/z+++/s2fPHkJDQ8mXLx8VKlTg66+/TtIeEyZMYMuWLRw+fJgNGzaQK1cuypYty+zZs3n77bcz7fcRQuQcBu3J+zVCCCGEEEJkIRlDKoQQQgghdCUJqRBCCCGE0JUkpEIIIYQQQlfpSkgnTJiAwWBg0KBBT33crl278PX1xcXFhdKlSzNr1qz0vKwQQgghhMhGLE5Ijxw5wpw5c6hWrdpTH3fx4kXatGlD/fr1OX78OMOHD+eDDz5g1apVlr60EEIIIYTIRixKSB8+fEj37t358ccfyZcv31MfO2vWLHx8fJg8eTIVK1akT58+vP3220ycONGigIUQQgghRPZiUR3S/v3707ZtW5o1a8bYsWOf+tgDBw7QokWLJPtatmzJvHnzMBqNODo6JjsmOjqa6OjohO34+Hju3r1LgQIFMmzNZyGEEEIIkXE0TePBgwcULVoUOzvz+jzNTkiXL1/OsWPHOHLkSJoeHxISQuHChZPsK1y4MLGxsdy+fRsvL69kx0yYMIHRo0ebG5oQQgghhNDZ1atXKV68uFnHmJWQXr16lQ8//JBt27bh4uKS5uOe7NU01eJPrbfT39+fwYMHJ2yHhYXh4+PDP//8Q/78+c0JOccyGo3s3LmTxo0bp9gLnRq7WbPQPD3RXnstE6OzXpa2W04mbWa+iIgISpQoAcC///5Lnjx5dI7INsi5Zpmc3G6Ggwexb9eO2O3boXr1NB+Xk9ssPe7evUu5cuVwd3c3+1izEtKjR48SGhqKr69vwr64uDh2797NDz/8QHR0NPb29kmOKVKkCCEhIUn2hYaG4uDgQIECBVJ8HWdnZ5ydnZPtz58/f6rHiKSMRiNubm4UKFDAvD+mESMyLygbYHG75WDSZuZ7/II+f/785M2bV79gbIica5bJ0e3WsCH89BM0agRm3ELO0W2WASwZXmlWQtq0aVNOnTqVZN9bb71FhQoV+PTTT5MlowB+fn5s2LAhyb5t27ZRq1Yt+U+2VhcvwubN0K+f3pEIIYQQlsudG3r00DsKkQZmjTh1d3enSpUqSb5y5cpFgQIFqFKlCqBut/fs2TPhmL59+3L58mUGDx7MmTNnmD9/PvPmzWPIkCEZ+5uIjHPwIHz+Ody9q3ckQgghhGVmzoRRo/SOQqRRhq/UFBwczJUrVxK2S5UqxaZNmwgMDKRGjRqMGTOGqVOn0rFjx4x+aZFRXn8drl4FGa8rhBDCVoWHw/37ekch0siisk+PCwwMTLK9cOHCZI9p2LAhx44dS+9Liazi6Ki+IiLAwQFSGM8rhBBCWLVPP9U7AmEGWctepCwsDLy94eef9Y5ECCGEMM+2bfDokd5RCDNIQipSlicPfPcdNGumdyRCCCFE2t28CW3awPLlekcizJDuW/YiG3vrLb0jEEIIIcxTuDD88w8UKaJ3JMIM0kMqnm7lSpmlKIQQwjbEx4OmQenS4OamdzTCDJKQiqe7fh3OnlV/4EIIIYQ1W74cnn9eTcoVNkVu2Yun++gjvSMQQggh0qZUKXj5ZciVS+9IhJkkIRXPpmmwZw+89BKksBqXEEIIYRX8/NSXsDlyy1482x9/qPWAt2/XOxIhhBCZafduaN8eSpQAg8G25hDMmweHDukdhbCQJKTi2WrUUMuJtmihdyRCCCEy08OHUKkSfPNNxs9SX7AAGjeGQoXA3R18fTOu1nV8PMyYAVu3ZszziSwnt+xF2tSurf7VNHXVLIQQIvtp00Z9QcavdPT77/DKKyrZzZcP1qyBHj3UioCdO6fvue3s4MgRiInJmFhFlpOEVKTdu+9C3rzw7bd6RyKEEMLWLFmSdPuTT2DnTlVeMD0JaXw8hIRA0aLg4pK+GIVu5Ja9SLvq1aFqVb2jEEIIkV3cvw8FC6bvObZsUWNez53LkJCEPqSHVKTdgAF6RyCEECK7+OknCAqCH35I3/PUq6fGp5YtmzFxCV1ID6kwz7VrMGmSFMoXQghhuXXr4L33YPZsVcg+PTw84M03ZX6DjZOEVJjnzz/hyy/h6lW9IxFCCKG3119XieDTvp4sxbR8uRozOmsWvPVW+l5/zBiYODF9zyGsgtyyF+Zp0UL1kubOrXckQgghMtrDh3D+vPo+JkZNFjpxApycVDmoJ1WqlPLnwcWLqqapszNUqZK4/8cfYeBAWLgQunRJf7zR0WqGvbB5kpAK89jZqTefyEj1ZpUnj94RCSGEyChBQapWqMns2eqrRAm4dCn547/8Mvm+f/+FRo1UErtqVeIynpMmwdChMH26+nlIiNpvb69qk1pi7FjLjhNWRy4rhPni4qByZVVLTgghRPbRqJGaI/DkV0rJaEpMyWhoKKxeDW3bJv5s6lT1+dG3L3h5JX698IL5cWqaev7oaPOPFVZJekiF+ezt1ZWulIASQghhcuGC6l0NDVU9o48no5D2pDYtTpyAjh1hx46kPbrCZklCKizToYPeEQghhLAWFy6ontGbN1Uy+vLLmft6NWuquqNlymTu64gsI7fsheW2b4fu3aUElBBC5GQXL6peyqxKRo1G9W/ZslLqKRuRhFRYTtMgPBwePNA7EiGEEOn1rPJNj3+ZXLyYOEEpK5JRgM8+UxVfpDMkW5Fb9sJyzZurLyGEELbP3ATv0iXVMxoSAr/+mjXJKECzZupWvfSOZitm9ZDOnDmTatWq4eHhgYeHB35+fmzevDnVxwcGBmIwGJJ9/f333+kOXFgJTYM9e+DGDb0jEUIIkVUuXVI9o8HBKhlt1y7rXrtZM3j33ax7PZElzOohLV68OF999RVl/1sv9qeffqJ9+/YcP36cypUrp3rc2bNn8fDwSNguZGm9MWF9Hj1Sb0TDhqkvIYQQtmvBAli0SK3KFxUF5crB4MFqvsDjevWCy5ehfHl1q37VquTP9eabKnnMKJqm6pi+/TZUrJhxzysyTHpGUZiVkLZ74gpo3LhxzJw5k4MHDz41IfX09CRv3rwWBSisXK5ccPiwGlwuhBDCtv3+O7zyiqoznS8frFkDPXqAg4Na7hMgPh6OHlXfnz2rvlLStWvGxnb9uqo92q6dJKRWKCYG+ve3t/h4i8eQxsXF8csvvxAREYGfn99TH1uzZk2ioqKoVKkSI0eOpPEzaoZFR0cT/Vix2/DwcAC+/lpj7FijDBtJA+N/sxBN/2aqUqVUseOoKHB0zPzXy0RZ2m7ZhLSZ+R5vK6PRKG2XRnKuWcasdluwIOn2oEHY79gBy5cT99prifvv3Uvri6cxyjQoXBhOn1YrBmbyOSDnmnnu34dOnewJDLR8rrxB08zrYD116hR+fn5ERUWRO3duli5dSps2bVJ87NmzZ9m9eze+vr5ER0ezePFiZs2aRWBgIA0aNEj1NUaNGsXo0aNT+EkYjRvfp1+/Ezg6yuw6a1Jm7VqK797Nru++k4HmQjxDVFQUXf5bx3v58uW4uLjoHJEQqav/6aeElyjBH/366RaD8927YDAQnS+fbjGIlN286crYsXW4etUDCAfyEBYWlmSoZlqYnZDGxMRw5coV7t+/z6pVq5g7dy67du2iUqVKaTq+Xbt2GAwG1q9fn+pjUuoh9fb2BsIADxo3jmfFijhkFEDqjEYjAQEBNG/eHMcs6LU0HDiA4fhx4v/3P3Vrx0ZldbtlB9Jm5ouIiCDffx+soaGhMqQpjeRcs0x62s2waBH2779P7N69qhi9Tuw++gi79euJPXdO9ZBmMjnX0uboUQMdOthz86bqiMqfP4y7d/NalJCanTk4OTklTGqqVasWR44cYcqUKcyePTtNx9epU4clS5Y89THOzs44OzunsF8jOhp27rSjcWM7Nm6EEiXM/Q1yFkdHx6z5Y2rQABo0wPLRI9Yly9otG5E2S7vH20nazXzSZpYxu93WrYP+/WH2bBxffDHzAkuLsWOhWzccU8gNMpOca6nbsAG6dFFzm0HNf/v551heeMGy50v3ZYamaUl6M5/l+PHjeHl5WfRaa9fGUbCg+v70aahTB44ds+ipRGZ48ABGjlTLuQkhhLBdy5erSUyzZsFbb+kdDeTNC3Xr6h2F+M/06WoFcVMyWq8e7N+vppRYyqyEdPjw4ezZs4dLly5x6tQpRowYQWBgIN3/Kwfh7+9Pz549Ex4/efJk1q5dy7lz5zh9+jT+/v6sWrWKAQMGWBTsCy9oHDgAzz2ntkNCVMfcb79Z9HQiozk6qjexkyf1jkQIIYSlfvwReveGhQvVv3qKjVW9Txs26BuHAFSBhSFDYMAA9T2oXtKAAChQIH3PbdYt+5s3b9KjRw+Cg4PJkycP1apVY8uWLTT/b7We4OBgrly5kvD4mJgYhgwZwvXr13F1daVy5cps3Lgx1UlQaVG2LBw4AO3bw759EBGhvp82DXQcby0AXFxU+Q/77HLjXgghcphJk1Stz+nTE5cEBfW+rkcN8YcPoUYN8PbO+tcWSURGqgpgj5ecHTYMxo3LmGG9ZiWk8+bNe+rPFy5cmGR76NChDB061OygnqVAAdi+XdXlXblSZen9+6sldb/+OkvGO4vU2NtDdLS6bV+lit7RCCGEMMfUqaqMX9++6sukRAm1OlNWy5tXDRsQurp1S5WnPXhQbdvbw4wZ8L//Zdxr2Gzq5uICy5bBp58m7ps4UQ15iYzULy6Burpu00a9qQkhhLAdly6p5Xae/NIjGT11Sg0Dk88SXf3zD/j5JSajuXOrERQZmYyCDSekoHpCv/pKXTyZekV//RWaNlXZvNDJRx/Btm1y614IIYTlfvsNRozQO4ocbe9elYz++6/aLloU9uyB1q0z/rVsOiE1ee89la3nyqW2DxxQk/FksrdOSpaEChUSr6yFEEIIc/n7q1I60rmhixUroFkzuHtXbVetqnpJa9TInNfLFgkpqDvEe/ao7B3g/HmV1e/bp29cOdbNm/Dii+rySgghhDBHcLD6N08efePIgTRNzcfp0kVNCQFo3lzlWJk5tyzbJKSgFpE4eDBxLs2dO+r2/cqV+saVI3l6qssoWRJRCCGEOR48gIoV4Ycf9I4kx4mNhfffV7PnTd5+GzZuzPxrg2yVkILK3vfuVdk8qOy+c2f45hu5e5ylDAZVy87SJRuEEELkTG5uMH8+vPqq3pHkKA8eqJn0jy+8OWYMzJ2ryoxntmyXkILK4jduTLq4xKefqjqlsbH6xZUjHT0KixbpHYUQQghbYW8Pr70GxYrpHUmOceOGWmho82a17egIixerxRcNhqyJIVsmpKAac948ld2bzJqlsv8HD/SLK8dZv14VWjYt6SCEEEKkZtUqtTqU0ah3JDnGqVNQuzacOKG28+ZVhXLefDNr48i2CSmorH7kSJXlm7qbN2+Ghg3V1YDIAsOGqV5SWa1ACCHEs8TEqA6MrLhHLNi+Xa1Df+2a2i5ZUq1J36hR1seSI7KEN99U2X7evGr7+HF1NXDqlK5h5Qyurur2y40b6o1GCCGESE3XrjLMK4ssWKDqiYaHq+1atdTE8IoV9YknRySkoLL9/ftV9g/qaqBePXV1IDLZzZtQurRaWksIIYRIyfLlcPu23lFke5oGn3+uZs+b5tW0bw+BgVC4sH5x5ZiEFFTWf/Bg4sTv8HB1dTB/vr5xZXuFC6txEx066B2JEEIIa3TnDrzzDqxbp3ck2VpMDPTsmXR+zQcfqKG7psWF9JKjElJQudHOnepqANTVwTvvwGefSVmoTPXGG1LgWAghRMoKFIBLl7J+Jk0Ocu8etGwJS5aobYMBvv8epkyxjsWwclxCCuoqYNUqdVVgMnasumowrUogMsHixWp8kBBCCGHy6JGaVV+oEDg76x1NtnTpErz0krotD2rNmlWrYNAgHYN6Qo5MSEFdDUyZApMnJ9bYWrIEWrVSVxEiE3h4qJllUs5DCCGEybffQrVqEBendyTZUlAQ1KkDZ86o7UKFVGJqbesO5NiE1OTDD9VVgqur2g4MVFcRly7pGVU21b49zJwp5TyEEEIkeu01GD7cOu4bZzPr16tSlzdvqu3y5dVcmtq19Y0rJTk+IQV1lbBzp7pqAHUVUbs2HDmib1zZUmysmm0vGb8QQgiAqlWhRw+9o8h2fvhB5TePHqnt+vVVtaHSpfWNKzWSkP6ndm111VC+vNoODVVXFTLhL4PFxKjBu7/9pnckQggh9BQXp+YVHD6sdyTZSnw8DB4MAwcmLpLYpYuqx54/v76xPY0kpI8pXVpdPTRooLYjI9XVxbRp+saVrbi5wd9/w4ABekcihBBCT6GhcPWq3KrPQI8eqaI233+fuM/fH37+WU1ksmaSkD4hf351FdGtm9rWNNWh99FHMt46wxQooP41DWoRQgiR83h5wd694OurdyTZQmgoNGkCq1erbXt7mDMHxo+3jdW7bSDErOfsrGbcjxiRuG/yZHXVYRqLIdLpu+/UuKHISL0jEUIIkdX++ANOntQ7imzj7Fnw84NDh9R27txqZNy77+oblzkkIU2FwaBqk/74Y+LdhDVroHFjdRUi0um119Slm5OT3pEIIYTIat9+C2+9JSvSZIA9e6BuXbhwQW0XK6Y6nlu10jcuc5mVkM6cOZNq1arh4eGBh4cHfn5+bN68+anH7Nq1C19fX1xcXChdujSzZs1KV8BZrU8f2LQJ3N3V9uHDqp7X2bP6xmXzSpVSS4nK2CEhhMh5FixQvTymQuDCIitWQLNmcPeu2q5WTU3Qrl5d37gsYVZCWrx4cb766iuCgoIICgqiSZMmtG/fntOnT6f4+IsXL9KmTRvq16/P8ePHGT58OB988AGrVq3KkOCzSosW6gqkWDG1ffGi6hrfvVvfuGye0Qi9eqlCsEIIIXKGhw9VPWofH70jsVmaBl9/rWbPx8SofaZcpXhxfWOzlFkJabt27WjTpg3lypWjXLlyjBs3jty5c3Pw4MEUHz9r1ix8fHyYPHkyFStWpE+fPrz99ttMnDgxQ4LPStWrq7EZpquOe/egeXNYulTfuGyao6O6OpaVm4QQImc4fx6KFFGZk7BIbCz07QvDhiXue+cdNWbUw0O/uNLL4jGkcXFxLF++nIiICPz8/FJ8zIEDB2jRokWSfS1btiQoKAijDSYhxYqpvyHTuIyYGOjeXc1gk2EwFlq4UF3iCSGEyP4KFIDPPoNatfSOxCY9eADt2qkpGCbjxqn5Lra+CKKDuQecOnUKPz8/oqKiyJ07N2vWrKFSpUopPjYkJITChQsn2Ve4cGFiY2O5ffs2Xl5eKR4XHR1NdHR0wnZ4eDgARqNR90TWxUXdYf7gA3vmzVP5/IgR8O+/8UybFmc1J4SpnfRurzS5cwfD2rVo77yjdyS21W5WQtrMfI+3lTW8r9kKOdcsY1Xtlju3qtoOVn13zKra7D/Xr0OHDg788Ycad+vkpPHjj3F07aoRG6tzcP9JT3uZnZCWL1+eEydOcP/+fVatWkWvXr3YtWtXqkmp4YkBy9p/XYlP7n/chAkTGD16dLL9O3fuxM3NzdyQM8XLL0NMzHMsXqx+7/nz7Th27DZDhx7Bzc1KzgwgICBA7xCeqcjBg/hOmsROe3siPT31DgewjXazNtJmaRcVFZXw/Y4dO3Cx9orVVkbONcvo3W5l1q4lzsmJS23a6BqHOfRuM5NLlzwYM6YOd+6oXq/cuWMYNuwwefLcYdMmnYN7zKN01MY0aFr6bjY3a9aMMmXKMHv27GQ/a9CgATVr1mTKlCkJ+9asWUOnTp149OgRjql0J6bUQ+rt7U1wcDAFTEXVrcSKFQbeeceemBiVYFetqrFuXazug4qNRiMBAQE0b9481Xa2GvHxaopgwYJ6R2Jb7WYlpM3MFxERQb58+QAIDQ0lb968+gZkI+Rcs4y1tJvdkCHg4kL82LG6xZBW1tJmAAEBBrp0sefBA5VnlCypsX59LBUq6BpWiu7cuYOXlxdhYWF4mDmg1ewe0idpmpYkeXycn58fGzZsSLJv27Zt1KpV66n/wc7Ozjg7Oyfb7+joqPuJ8aQ334QSJVQFo7t34dQpA/XqObJxI9SooXd01tlmKfLyUoNyHzxIXMlJRzbTblZE2iztHm8naTfzSZtZRvd2+69zypaK/endZvPnw3vvkXBL/sUXYf16A4ULW+f5n562MmtS0/Dhw9mzZw+XLl3i1KlTjBgxgsDAQLp37w6Av78/PXv2THh83759uXz5MoMHD+bMmTPMnz+fefPmMWTIEIsDtkb168P+/VC6tNq+cUPt27pV37hsTrNmap1WIYQQ2UdkpFrPMj5e70hshqbByJFq9rwpGW3fHnbuhCem5mQbZiWkN2/epEePHpQvX56mTZty6NAhtmzZQvPmzQEIDg7mypUrCY8vVaoUmzZtIjAwkBo1ajBmzBimTp1Kx44dM/a3sALly8OBA1C7ttp++BDatlUz30Qaff45DB+udxRCCCEy0ubN0KkTXLqkdyQ2IToaevRQs+dNPvhATai2kmk0mcKsW/bz5s176s8XLlyYbF/Dhg05duyYWUHZKk9PdfXy5pvqYjAuDv73P1VIf+xYsJOFWp+uWTO9IxBCCJHRXntNLW9ouo0oUnXvHrz6KuzapbYNBvj+e/jwQ33jygqSImUwV1f45ZfEqhYAEyaoeqWpDLUVj/vnH2jUCIKD9Y5ECCFEeoWFqX/LlNE3Dhtw8aJak96UjLq6qs6tnJCMgiSkmcLODr77DqZNS+wVXb5crex0546+sVm9woXVX6FpYV4hhBC2KS4OXngBxozROxKrd+QI1KkDf/+ttgsVUndcO3TQNawsJQlpJhowANauTRzzsWePuvq5cEHXsKxbnjxqvFHlynpHIoQQIj0MBjVerX17vSOxauvWQcOGEBqqtsuXh4MHE+ek5BSSkGaydu1U97tpVtw//6iroIMH9Y3L6p08qUZwCyGEsE12dmoyU7VqekditaZOVWNGIyPVdoMGSav25CSSkGaBWrVUAlqxotq+dQsaN1ZjQ0Qq5s+H8eNV7QshhBC2ZeNGNas3JkbvSKxSXBx89JEaH2r6mOvWDbZtg/z59Y1NL5KQZpGSJdVVT+PGajsqCl5/Xc2ek5wrBWPGqCz+KUvMCiGEsFLh4XD/Pjg56R2J1Xn0CN54AyZPTtw3YgQsXgwprAmUY0hCmoXy5oUtW1R9MVCJ6ODB6gopLk7X0KyPuzs4OkJICERE6B2NEEIIc3TtCitX6h2F1QkNhSZNYM0atW1vr+qVS2lISUiznJMT/PQTfPFF4r5p09QYEsm7nvDggRrdPWuW3pEIIYRIq59+Ur2jIomzZ9UckkOH1La7uxrZ0KePvnFZC0lIdWAwwKhRsGABOPy3NMGGDar8ZkiInpFZGXd3WLIE3n5b70iEEEKkxaVLavH1nTv1jsSq7NkDfn6q1ihAsWJqX8uW+sZlTSQh1VHv3uoWvoeH2g4KUldPf/2la1jWpV07yJdPBtoKIYQtKFlSJaVS6inBsmVqIcJ799R2tWpqikT16vrGZW0kIdVZ06awbx94e6vty5dVrVK5uHzMli2quHJUlN6RCCGESE1oKMTGQpEiMiAS1Y8yYYKaPW8qNtCypeoZLV5c39iskZwxVqBKFXW19PzzajssTJ20ixfrG5fVKFVKXUrKIFshhLBevXurCRECo1GNXBg+PHFfnz5qeJ7prqhIykHvAIRStKgqoN+lixrkbDRCz55qvMlnn+Xw6kfly8O8eXpHIYQQ4mnGj1eTUXO4Bw9UWaetWxP3jR8Pw4bl8M/yZ5AeUiuSO7daavT99xP3ffGFmtMjtYVRKzetW6d3FEIIIVJSowbUr693FLq6dk01gSkZdXKCn38Gf39JRp9FElIr4+AA06fDt98m7lu4ENq2Vbfyc7Rly9T9DiGEENbjyBG1GPvNm3pHoquTJ9XE5D/+UNv58kFAgBpDKp5NElIrZDDAkCGqprBp1Ybt2+Gll+DKFX1j09WSJTB3rt5RCCGEeFxcHHh5QcGCekeim61boV49uH5dbZcqpVZnbNBA37hsiSSkVuyNN2DHDihQQG2fPq2uvo4d0zcu3bi4qH+DgtRMTiGEEPqrUweWL1fLDuVA8+apu5im4bMvvqgmKleooG9ctkYSUitXt646scuWVdvBweqKa+NGfePSzYUL6q991Sq9IxFCCDFpEvzzj95R6ELTYORINXvetPx3hw6qbKOnp66h2SRJSG1A2bJw4IBKTkFVP3rlFZg5U9+4dFG6tCpH8PrrekcihBA52/37MHmyGkOaw0RHw5tvwrhxifsGDYJffwU3N93CsmmSkNqIggXh99/VbXyA+Hjo1w+GDlXf5yj166tbQ1J6QAgh9JM3L5w7p+oV5iB370KLFrB0qdo2GGDKFPj++xw7aiFDSEJqQ1xc1DCdoUMT9337rXoviIzULy5dzJ8PlStLUiqEEHq4fBlu31Yzb3NQFnbhgrpbuXu32nZ1hTVr4IMP9I0rO5CE1MbY2cHXX6vb9aaV2X75Ra2Te/u2vrFlKT8/+N//cmD3sBBCWIGhQ1U3oabpHUmWOXxYffScPau2PT0hMBDat9c1rGxDVmqyUX37go8PdOqkxpTu36/+UDZtguee0zu6LFCxovoSQgiR9aZPV72kOaTa+9q1qp6o6W5khQrq87ZUKV3DylbM6iGdMGECL7zwAu7u7nh6etKhQwfOmi4VUhEYGIjBYEj29ffff6crcAFt2qjbBl5eavv8eZWU7t+vb1xZ6uuv1coBQgghsobRqCY2+PrqHUmWmDIFXnstMRlt2FB9zkoymrHMSkh37dpF//79OXjwIAEBAcTGxtKiRQsiIiKeeezZs2cJDg5O+HouR3TjZb7nn1dloapUUdt37kCTJvDrrznjqpXz5+HqVb2jEEKInGHPHlXt5NIlvSPJdHFx8PHHdgwalDgyoXt3VQQ/Xz5dQ8uWzLplv2XLliTbCxYswNPTk6NHj9LgGcsReHp6kjdvXrMDFM/m4wN796pKSNu3q3IU3bo50KtXWVq31ju6TDZnTo65ZSSEELorWlRlZT4+ekeSqR49gm++eZFDhxInbI0cCV9+KR85mSVdY0jD/ltcPX/+/M98bM2aNYmKiqJSpUqMHDmSxo0bp/rY6OhooqOjE7bDw8MBMBqNGI3G9IScbbm5wbp18P779ixapDq+f/qpMo6ORqZONeKQnUcLx8ZiWLkSrUOHdBeAM51fcp6lnbSZ+R5vK3lfSzs51yyToe3m4wNjxqjuQ1M1+Gzm5k149VU7goLUeDh7e40ZM+J46y1NFgl8hvScYwZNs2yKnKZptG/fnnv37rFnz55UH3f27Fl2796Nr68v0dHRLF68mFmzZhEYGJhqr+qoUaMYPXp0sv1Lly7FTSrOPpWmwcqV5Vi2LHHCj69vCEOGBOHqmj3fPNxu3qRp//4cGTKEkDp19A5HiGeKioqiy3+1G5cvX46LaVlcIayVplFj+nSuNGnC3UqV9I4m01y7lpsxY+pw82YuAFxdjQwdeoSaNW/pHJltePToEd26dSMsLAwPDw+zjrU4Ie3fvz8bN25k7969FC9e3Kxj27Vrh8FgYP369Sn+PKUeUm9vb4KDgylgWthdPNWiRfH07etIbKzqLa1RQ2Pt2liKFtU5sMxy7RqYeR6mxGg0EhAQQPPmzXF0dMyAwLI/aTPzRUREkO+/QWihoaEynCmN5FyzTIa027172HfsSPzw4WjNmmVsgFZizx4Dr79uz7176p58gQKRbNwIzz+fPW4x7tlj4Pvv7Th50sCVKwZGjozj888ztnTinTt38PLysightaiVBw4cyPr169m9e7fZyShAnTp1WLJkSao/d3Z2xtnZOdl+R0dHeRNKo549jQQH7+e7717i/n0DJ04YqF/fkU2bEidAZSum6Y5XrmTI2CY518wnbZZ2j7eTtJv5pM0sk6528/SE3bvVTOhsOIhy6VJ4663EtVaqVdP48MPdPP98k2xzrkVFqc//N99Uy5za29vj6Jixixqkp63MmmWvaRoDBgxg9erV7Nixg1IW1jw4fvw4XqZaRSLTVK16h8DAWEqUUNtXr8JLL6mJT9nS9OlQtapa100IIUTG2LsXTpxQiWg2S0Y1DcaPV/O0TMloq1awc2csBQpE6RtcBmvTBiZMgM6d1QJb1sashLR///4sWbKEpUuX4u7uTkhICCEhIUQ+tm6lv78/PXv2TNiePHkya9eu5dy5c5w+fRp/f39WrVrFgAEDMu63EKmqVEmVhapVS22Hh0Pr1tm0dGenTrBokdTjEEKIjPTNN/Dpp3pHkeGMRrXg34gRifvefRfWrwd396SPXbhQ5eLp+ezMiOfIzsy6ZT9z5kwAGjVqlGT/ggUL6N27NwDBwcFcuXIl4WcxMTEMGTKE69ev4+rqSuXKldm4cSNt2rRJX+QizYoUUcubdeum/tBiY9WtiYsXYdSobHTBW6hQ4hpumpaNfjEhhNDR6tWqyHU2Eh4Ob7wB27Yl7pswQeXdBoNKVkXWMishTcv8p4VPpP5Dhw5l6NChZgUlMl6uXOo95aOPYNo0te/LL1VSOncuODnpG1+GGjRIZd0//KB3JEIIYbuMRggNhWLFoHBhvaPJMNeuQdu2cPKk2nZygp9+gv8KXwidmHXLXtg2e3uYOhW+/z6x83DxYmjZEu7d0ze2DFWlClSrpncUQghh2xYuhOeeU0lpNnHiBNSunZiM5sun5lVIMqo/SUhzoEGD4NdfwVT6MDBQTXbKNivB9emjBgYJIYSwXOfOqtfC01PvSDLE1q1Qvz7cuKG2S5eGAwfUvvR4/fXE+V6pfR06lP74s7vsUVxLmO2111Qi2q4d3LoFZ85AnTrw22+JE6Bs2qNHajWRbt3UzHshhBBpFx8PHh7QsaPekWSIH3+E999PXFyqdm01pyIjcu1KlSB37uT7L16E3bvVjPYqVdTnrJ4ePoTz59X3MTEQEqJ6jJ2c1O+gN0lIc7DatdUM/DZt4OxZtVxaw4awfLlKVG2aoyNs2qSSUUlIhRAi7e7cUT0UP/4IT0xitjXx8WoN+gkTEve9+iosWZLulaYTfPll8n3//quazskJVq1S8zj0FhQEj6/aPnu2+ipRwjrukMot+xyudGnYvz/xlsWjR9ChQzaYD+ToCMeOqR5SIYQQaRcfD82bW0e3WTpER6si8I8nox99BL/8knHJaEpMyWhoqJpM3LZt5r2WORo1UgVonvyyhmQUpIdUAPnzQ0CAKgW1bJl6Lxo4EC5cgIkTwc5WL1vs7dV9ifXr1W0nKQMlhBDPVqgQzJihdxTpcveu6lzZs0dt29nB5Mnqsy0zXbigeiFDQ1XPaHqS0ddfhz//NO+YRYvgxRctf009SUIqADXGZckStQLn+PFq3/ffw+XLakx7Zl5NZqodO9TA/BMn5Na9EEI8y+jR4OsLL7+sdyQWu3AhcSgagKurGor2yiuZ/7qNGqnhb6tWpb8JL11K/B3S6tGj9L2mniQhFQns7GDcOJWU9u2rBn+vXg3Xr2fc4O8s17Kl+osuW1bvSIQQwrrFxamBhvnz6x2JxQ4dSpysC6p86oYN8MILmfu6Fy+qntGMSkZB/VfkJLZ6M1Zkoj59YOPGxFmDhw6Bn5/5V2pWwWBQyaimwWMriAkhhHiCvb3K3mx0ae81a1RSaEpGK1ZUE3ezIhlt1EjNWs+oZDSjPass1eNfepGEVKSoZUvYu1ct0AHqVoSfX+J4HJvz2Weq2Gp0tN6RCCGE9fnjD9i3T31vg+PtJ09WUwUiI9V2w4bq1ylZMnNf99IllQSHhKj63taYjELKk5lS+9KLJKQiVdWrq6tL06JH9+5Bs2ZqLI7NefttteqIs7PekQghhPWZMQP69dM3I7FAXBx8+KGaPW8K/c03VRH8fPky97UvXVI9o8HBKhm1+XKJOpMxpOKpihdXvaKdOqk/8JgY6NpV/SF++qkNXUiXLq2+QJURsNnSAUIIkQlmzlTdfDbzpg4REdC9O6xbl7hv5EhVFzQrfo1evdTE3/Ll1a36VauSP+bNN1VHjjVYsEDNwv/zT4iKgnLlYPBg1YbWQBJS8UweHmpYUb9+MHeu2ufvr27jT5+uSn7ajN691YD9SZP0jkQIIfQXG6t6GMqWhaJF9Y4mzW7eVD2SR46obQcHVeT97bez5vXj4+HoUfX92bOpz7Ho2jVr4kmL339XlQa++Ub1Hq9ZAz16qLbr3Fnv6CQhFWnk6Ahz5qhOxuHD1b4ff1TzhH75Bdzd9Y0vzerUgTx59I5CCCGsw+LFqqzKxYs2k5CeOaPKOpkKuru7q97J5s2zLgY7O7UUpy1ZsiTp9iefwM6dsHKlJKTCxhgMqme0ZEnV0RgTo27j168Pv/2mbu9bvb599Y5ACCGsR5cuULCgzSSju3apgvf376vt4sUTV4kW5rt/H7y99Y5CkYF0wmxdu8L27YkDxv/4Q3U8njypb1xp9vChKmtiut8ihBA5UUyMqhpvI7Nxfv5Z9YKaktEaNVRZQklGLfPTT6rW6Xvv6R2JIgmpsEj9+nDggCqiD6p4fr16qsfU6rm4wPHjahCsEELkRNeugY+P6nK0cpoGY8eqCUJGo9rXujXs3m0zHbtWZ906lYjOng3PP693NIokpMJi5curslC1a6vtBw/Uur2miU9Wy8FBFVl94w29IxFCCH3kygVvvQU1a+odyVMZjWqxls8+S9z33ntq9UCbmbtgZZYvV2NGZ81Sp4C1kIRUpIunp1ou/tVX1XZcHLz7LowYYeXl7AwGVUF52rTES24hhMgp8uWDCRNUGRUrFR6uOjnmz0/c9/XXqkKVQxbPgKlRA774Qv2r53Ok148/qjkgCxeqf62JTGoS6ebmpmbaf/IJfP+92jd+vJoBOX++Fdei/+cfGDpU9RDUq6d3NEIIkfk0TRXQ7NgR2rfXO5pUXb2qktFTp9S2k5OqoanXbPAaNdKfSGbEc6THpEnqI2/69MSlTkGtGFuokH5xmUgPqcgQ9vbqZJ86NbHm/NKlagD63bv6xpaq6tXVu54ko0KInCIqSi2hbMWLg5w4oSbKmpLR/PlVDU1rKE1ky6ZOVXcx+/YFL6/Erxde0DsyxXrPSGGTBg5UxXZdXdX2nj1Qt64Vzx8qWFD9hR46pHckQgiR+VxdYcUKq51Zv3mzmjR744baLl1aTaCVfoP0u3Qp5bXrTfVc9SYJqchwr7yiJm4WLqy2z55VV7tWm/PNnaveAU33L4QQIhsyrF4N27bpHUaq5sxRebKp4HydOmribLly+sYlsoZZCemECRN44YUXcHd3x9PTkw4dOnA2tfWyHrNr1y58fX1xcXGhdOnSzJo1y+KAhW144QX1RlKxotq+dQsaN1a9p1anZ0/VlVukiN6RCCFEprFbvlwV87Qy8fFq0ZX33lM3rEANcd2xwzrGNoqsYVZCumvXLvr378/BgwcJCAggNjaWFi1aEBERkeoxFy9epE2bNtSvX5/jx48zfPhwPvjgA1atWpXu4IV1K1kS9u1Tg6dBTWrv2BEmT9YxqJS4uqraVZoG9+7pHY0QQmSKuBUr1BR1KxIVBd27w1dfJe4bPFgtZ2ka+iVyBrNm2W/ZsiXJ9oIFC/D09OTo0aM0aNAgxWNmzZqFj48Pk//LQipWrEhQUBATJ06kY8eOlkUtbEa+fLBli6ojt2SJyvk++kgtmzxpkpoMZTU+/RSHLVtg1Ci9IxFCiIxz/jweFy+qcndubnpHk+DOHbUM6N69atvODqZMUQvpiZwnXWWfwsLCAMifP3+qjzlw4AAtWrRIsq9ly5bMmzcPo9GIo6NjsmOio6OJjo5O2A4PDwfAaDRilJqRaWJqJ2toLzs7mDcPfHzsGD9eZaBTp8KFC/EsXhxHrlw6B2jy+uvE+/qCnZ1VtJutsKZzzVY83lbyvpZ2cq5Z6LvveGHzZozvvKN3JAn+/RdeecWBc+cMALi5aSxZEsfLL2tWURpazjXLpKe9DJpmWflyTdNo37499+7dY8+ePak+rly5cvTu3Zvhw4cn7Nu/fz8vvfQSN27cwMvLK9kxo0aNYvTo0cn2L126FDcruroT5vv9dx9mzKhOXJwaLfLcc/cYMeIQefNGP+PILKZpqjdBiEwQFRVFly5dAFi+fDkuLi46RySyM0NsLG63bhGRwuetHs6ezcf48bUJC1NFqvPmjWLkyEOULXtf38BEuj169Ihu3boRFhaGh5mLLljcQzpgwABOnjzJXlNf+1MYnvhgN+XAT+438ff3Z/DgwQnb4eHheHt707hxYwoUKGBpyDmK0WgkICCA5s2bp9gLrZc2baBNm3g6dzYQHm7g3Ll8jBrVknXrYhMmQOnJaDRys2NHileurFYxEc9kreeaNXt83H2TJk3ImzevfsHYEDnXzBQTAyEhGL28rKbd1qwx8MUX9kRFqc//ChU01q+3p2TJurrG9SQ51yxz584di4+1KCEdOHAg69evZ/fu3RQvXvypjy1SpAghT5TTCQ0NxcHBIdXk0tnZGecUlvdxdHSUE8NM1thmrVqpMUNt26q69JcuGWjY0JE1axInQOnpYbFiGMqVw8HK2s3aWeO5Zq0ebydpN/NJm6XRrFkwfDicPw/o226apia0fvxx4rLSjRrB6tUG8uWz3v9LOdfMk562MmuWvaZpDBgwgNWrV7Njxw5KlSr1zGP8/PwICAhIsm/btm3UqlVL/pNzsKpVVVmomjXV9v370KKFmviktwvt2qG99ZbeYQghRPr07q3KPD1lnkdWiIuDDz9Us+dNyWiPHrB1q5r4KgSYmZD279+fJUuWsHTpUtzd3QkJCSEkJITIyMiEx/j7+9OzZ8+E7b59+3L58mUGDx7MmTNnmD9/PvPmzWPIkCEZ91sIm1S0KOzerW7jAxiN6k1qzJjENy3dxMaqwni//aZzIEIIYYGoKHB3VyuV6CgiAl57DaZNS9z3+efw009qfXohTMxKSGfOnElYWBiNGjXCy8sr4WvFihUJjwkODubKlSsJ26VKlWLTpk0EBgZSo0YNxowZw9SpU6XkkwAgd25Yt06trWvy+eeqTJSukxvt7eHvv9WYAiGEsCXHjkGJEvDnn7qGERKibsuvX6+2HRxg/nwYPVrmjIrkzBpDmpYJ+QsXLky2r2HDhhw7dsyclxI5iIMDzJih1iweOlTtmz8frlyBX3+FPHl0CMpggNWr5V1TCGF7ihaFd96BChV0C+HMGWjdGi5fVtseHrBqFTRrpltIwsrJWvbCKhgM8MknsGIFmOazbd8O9erp2ElpMKhu2ilTEt9VhRDC2hUpAuPHq6t9HezcCXXrJr5tenuriaySjIqnkYRUWJVOneD338FUgOHPP9WqnseP6xRQVBRMnKiyYyGEsGb374OfHxw5olsIS5ZAy5YqFFATVw8eVBNZhXgaSUiF1XnpJThwAMqWVdvBwVC/PmzapEMw7u7q3pMVrXAihBApevhQ9Y4WLZrlL61pakJqjx6J4//btFETV3UIR9ggSUiFVXruOZWU+vmp7YgINVl09mwdgsmdW73brl+v80wrIYR4iuLFYc0aKFYsS1/WaFTX7J9/nrjvvffUhNXcubM0FGHDJCEVVqtgQXX7/o031HZcnJqN/+mnEB+fxcGcPQuvvgqbN2fxCwshxDPEx6s3Rx0mD4eFqZ7QBQsS9339NcycqdsQVmGjJCEVVs3VFZYvVxOeTL75Brp2VcM7s0yFCnDqlO41/YQQIpnbt+HwYQgPz9KXvXpVTTw1DbF3dlYTU4cOlQIlwnySkAqrZ2enktDp09X3ACtXqhmbt29nYSCVKql/T53KwhcVQohn8PSEoKAsXXv5+HE14dRU6jR/fpWYduqUZSGIbEYSUmEz+vVTwzhz5VLb+/ap0iL/LdOcNfbuhWrV1IsLIYTeZs2Cc+cSr9azwObN0KCBmnAKUKaMmklfr16WhSCyIUlIhU1p21bN2ixSRG2fOwd16sD+/VkUwEsvqeVE69bNohcUQohUPHoE334LGzdm2UvOng3t2qkJ/aAmnh44oCaiCpEekpAKm/P883DoEFSurLbv3IEmTdSqTpnOYFBZscGQxeMFhBDiCW5uagjRgAGZ/lLx8TBsmJo7FRen9nXsqCaeFiqU6S8vcgBJSIVN8vFRd82bNlXb0dFqNv7EiapCU6bbulUF8c8/WfBiQgjxhIAACA1VSWkmT2ePioJu3dTseZMhQ9RYflfXTH1pkYNIQipsVp48qlh+796J+z75RHUWxMZm8os3bKhmWpUsmckvJIQQT4iNVYU+x43L9Je6c0dNIF2xQm3b2akJpt9+m6XDVkUOIFXChE1zcoL586F06cSizDNmqDWUly/PxKLMLi6Jt8mMRnB0zKQXEkKIJzg4qHFLmfy+8++/0Lq1GqsPqjN2xQp4+eVMfVmRQ8n1jbB5BgN89hksWpT4/rxxo+rENM0CzTQbNkD58nDvXia/kBBCACdPqhlFhQpB3ryZ9jIHDqgJo6ZktEgRNaFUklGRWSQhFdlGjx6wZYu6lQ9q0ZI6deD06Ux8UV9feP116SEVQmS++HhV6HPgwEx9mVWr1ERR07zNSpVUWSdf30x9WZHDSUIqspUmTVQJqBIl1PaVK6pC0++/Z9ILFi2qxpKa1rsXQojMYmenBs6PHp0pT69pMGmSmiBqWgmvcWM1gdT0nipEZpGEVGQ7T17Nh4dDq1bw00+Z+KJbt6pK0Vm6nqkQIse4fl2NVy9dWlX4yGBxcarj9eOPE6+te/ZUd50ycWSAEAkkIRXZUpEisGuXKuAMalJq794walQmdWR6e6suBElIhRAZTdPg1Vfhrbcy5ekjItTTT5+euO+LL2DhQjVxVIisILPsRbaVKxesWQODBsEPP6h9o0fDxYvw448Z/EZbqRIsWZKBTyiEEP8xGNQSoZlQZykkRE1UOnpUbTs4qPfHx8vpCZEVpIdUZGv29jB1qhoXZTCofYsWqVv49+9nwgvu2aMmHZiWMhFCiPQID1c9pM8/DzVqZOhTnz6tJn6aklEPD3WLXpJRoQdJSEW2ZzDARx+ppUVdXNS+nTvVsvSXLmXwizk4qBJQmZLtCiFyFE1TVTzefjvDn9r0Hnj5str29k66+p0QWU0SUpFjvPaaehMuWFBt//WX6h0ICsrAF/HzU0v6FSiQgU8qhMiRDAa1/FzPnhn6tIsWQcuWEBamtp9/Xk0ErVIlQ19GCLOYnZDu3r2bdu3aUbRoUQwGA2vXrn3q4wMDAzEYDMm+/v77b0tjFsJideqoN95y5dT2zZuqgP6GDRn8QidOqMGrUgpKCGGJ6Gj1b/PmqvZSBtA0+PJL6NVLTdgHaNNGTQAtWjRDXkIIi5mdkEZERFC9enV+MM0SSaOzZ88SHByc8PXcc8+Z+9JCZIgyZVSt0nr11PajR9ChQ9IZpukWHKy6Y+/cycAnFULkCJoGHTuqsUYZxGg08O679nzxReK+vn1h3bpMXGJZCDOYPcu+devWtG7d2uwX8vT0JK8UMxNWokABdWf9rbfUmvfx8Wpp+n//taN+/Qx4gdatoUULNatKCCHM1bUr5MuXIU8VFgZjx9bhjz8S+6C++QaGDEmc7CmE3rJsDGnNmjXx8vKiadOm7Ny5M6teVohUubjAzz+Dv3/ivu+/t+fbb18gMjIDXsDeXtWYyrTip0KIbCc+XmWJ3bur++npdOUKNGzowB9/eALg7AwrV6qhqZKMCmuS6XVIvby8mDNnDr6+vkRHR7N48WKaNm1KYGAgDRo0SPGY6Ohook3jZ4Dw8HAAjEYjRtPAF/FUpnaS9nq20aPB29vAwIH2xMUZOHCgKM2bx7FmjZFChdL33IZjx7BfsIDYd95R1fqzITnXzPd4W8n7Wtpl+3NN07Dv0gWtalXiR45M99MdPw4dOjgQHKwyzwIFNFatiqNuXY3s2oQZJdufa5kkPe1l0DTLu24MBgNr1qyhQ4cOZh3Xrl07DAYD69evT/Hno0aNYnQKa/UuXboUNzc3S0IV4pmOHfPkm29eICpKXacVLhzB558foFixiHQ9r11MDPGy3Il4TFRUFF26dAFg+fLluJjqkYmcTdMos349jwoXJrhOnXQ9VVCQJxMnJr6feXk95LPPDlK0aPrez4R4mkePHtGtWzfCwsLw8PAw61hdEtJx48axZMkSzpw5k+LPU+oh9fb2Jjg4mAJSTidNjEYjAQEBNG/eHEdHR73DsRlHj8by8stw544rAPnzqx6Fl15K5y3327exW7CA+Gw4aEvONfNFRESQ77/xgaGhoTK+Po2y9bmmaRn23jBnjh0ffGBHfLx6vtq14+jffxsdOzbMfu2WSbL1uZaJ7ty5g5eXl0UJqS5Lhx4/fhwvL69Uf+7s7Iyzs3Oy/Y6OjnJimEnazDy+vvD11zuYMqUFp04ZuHvXQMuWDixaBJ07p+OJjx+H777DvksXKF06w+K1JnKupd3j7STtZr5s12aapmqN1q6tZldaKD4ehg2Db79N3PfGGzB3bjw7d8Zkv3bLAtJm5klPW5mdkD58+JDz588nbF+8eJETJ06QP39+fHx88Pf35/r16yxatAiAyZMnU7JkSSpXrkxMTAxLlixh1apVrFq1yuKghchMBQtGsXNnLN26ObJtG8TEQJcualWnoUMt7MRo00ZNcDLzilEIkQPEx4OXFxQubPFTREWpnPaXXxL3ffIJfPWVrGQsbIPZCWlQUBCNHyvSO3jwYAB69erFwoULCQ4O5sqVKwk/j4mJYciQIVy/fh1XV1cqV67Mxo0baZMBsweFyCweHvDbb9CvH8ydq/YNGwYXLqh6pQ6W3Fvw8FBFTxcsUE+czW7dCyEsZG+v6jBZ6PZtaN9e1VcGsLODadPU2wxIQipsg9kfq40aNeJpw04XLlyYZHvo0KEMHTrU7MCE0JujI8yZA6VKwYgRat+cOaqMysqV4O5uwZMePKi6LRo0gKpVMzReIYSNiY+HTp3ULZjXX7foKc6fVzdgzp1T27lywYoV0LZtBsYpRBaQteyFeAqDAYYPV/VKTRPlt2yB+vXh+nULnrBJE7h8WZJRIYRaHjRXLsiTx6LD9+9XyyGbktEiRdQyoJKMClskCakQadCtm1rZybRwyh9/qA+CkycteLJChSA2Vo0FiI3N0DiFEDbE1RV++kmtV2+mX39V17em1YkrV1Y3YHx9MzhGIbKIJKRCpFGDBqpHolQptX3tGtSrB9u2WfBkp05B//6wd2+GxiiEsAGxsdChA1iwaqGmwcSJava8qTpikybqraREiYwNU4isJAmpEGaoUEH1Qrz4otp+8ECN35o/38wnqllTzZBq1CijQxRCWLsHD1RSmiuXWYfFxqqqUJ98krivVy/YvBmklK2wdZKQCmEmT0/VsWFaDyIuDt55B0aONHPJ+mLF1AFLlsDDh5kRqhDCGuXLp8p4mK5s0+DhQ/WeM2NG4r5Ro1TRDlkITmQHkpAKYQE3NzWGa9CgxH3jxsGbbybeRkuTGzegb19YsyajQxRCWJuHD6FdO7MHnwcHQ8OGsHGj2nZwgIUL4YsvpHqcyD50WalJiOzA3h6+/16NKR00SHV2Ll2qxpauWQP586fhSYoVg7//huLFMztcIYTe7t2D8HBIYSXC1Jw+rYYFmcp758kDq1ercaNCZCfSQypEOn3wgUpAXV3V9u7d8NJLamGmNDEloxs2qB5TIUT25O2t6jKVL5+mh+/Yod5LTMmojw/s2yfJqMieJCEVIgO0b68+Zzw91fbff6uyUIcPp/EJHj2C996DefMyLUYhhE4uXYKWLdXtkzRatAhatYKwMLX9/PNqQmXlypkTohB6k4RUiAzywgvqA6NCBbUdGqom0a9dm4aD3dzg0CE1M0oIkb3cv6+myOfO/cyHahqMHq1mzxuNal/btuqC18src8MUQk+SkAqRgUqVUrVKGzZU25GR8NprMGVKGg729lYzFPbvh6CgTI1TCJGFatSA339/Zm2mmBh46y01e96kXz91UZuGXFYImyYJqRAZLF8+2LpVzbgH1eMxaBB8+KEqEfVUmgZDh8LkyZkcpRAi0wUGqvE8Dx4886H370Pr1mrhJpOJE+GHH9SseiGyOznNhcgEzs5qDFjJkjB2rNo3daqanPDzz+oOfYoMBjVDyrRGqRDCdsXEqCKhqf7BK5cvq9vyp0+rbWdnVZ749dezIEYhrIT0kAqRSQwGGDNGzVMy9XCsXavGld68+ZQDCxVSB/z9N6xcmQWRCiEylGmFjBYt4JdfVI24VBw9qiZAmpLRggXV7HpJRkVOIwmpEJns7bdh0yZwd1fbR46oD6AzZ55x4Jw5MGGCmgwhhLAdkyfD//73zKXbfvsNGjSAkBC1XbYsHDgAdetmfohCWBtJSIXIAs2bq/qBppKjly6pD51du55y0IQJsHevDCATwtbky6dqwD1lGaUZM9Tw0keP1PZLL6lktGzZLIpRCCsjCakQWaRqVVXZqUYNtX3/vkpUf/45lQOcnSFXLlUs/5tvntnbIoTQmWnWYu/eiYPHnxAfD598Av37q+8B3ngDtm9Xt+uFyKkkIRUiCxUtqlZyat1abRuNajb+uHFPyTf37lW3AE339YQQ1icuDl5+GSZNSvUhkZHQubOaPW8ydCgsXw4uLlkQoxBWTBJSIbKYuzusX68WZjIZORL69EkshJ1Ep05w9qxUxRbCmhkM6r57tWop/vjWLWjaFH79VW3b2cHMmfD11+p7IXI6+TMQQgcODokfRibz56vSL+HhKRzg7q5qGQ4cCHfuZFmcQog0iIpSWeXIkdCsWbIfnzunxowfOKC2c+WCDRugb98sjlMIKyYJqRA6MRjU7boVK9RwUYCAAKhXD65eTeGAe/dg48bE+jBCCP3duAHlyqlSGinYtw/8/OD8ebXt5aWG7bRpk4UxCmEDJCEVQmedOqkJDfnzq+1Tp1RZqBMnnnigj4+6dd+gQVaHKIRITf780L07vPhish+tXKlu05tualSpAgcPwvPPZ3GMQtgASUiFsAL16qkPqjJl1PaNG1C/Pmze/MQDHR1VXdKPPlIDUYUQ+rl1S81GmjAhyRR5TVOFMTp3huhota9ZMzU/0cdHp1iFsHJmJ6S7d++mXbt2FC1aFIPBwNq1a595zK5du/D19cXFxYXSpUsza9YsS2IVIlt77jk1xszPT20/fAjt2qn6+EnY26uM9anLPQkhMtWyZVC+PFy7lmR3bCz06weffpq4r3dvdUc/T56sDVEIW2J2QhoREUH16tX54Ycf0vT4ixcv0qZNG+rXr8/x48cZPnw4H3zwAatWrTI7WCGyu0KF4PffE5cNjItTs/GHDUusWYjBoOrEvPuu2pb6pEJkvdatVb0202oXqIvI9u3h8T6XL79UExYdHXWIUQgbYvYSMK1bt6a1qYhiGsyaNQsfHx8mT54MQMWKFQkKCmLixIl07NjR3JcXIttzdVUTnT79NLFe4ddfq9WdFi78r16haQWYadPUWqQ//fTUVWGEEBkkJETdpShUCN5/P2H3jRuqDOnx42rb0VElom++qVOcQtiYTF+T8MCBA7Ro0SLJvpYtWzJv3jyMRiOOKVw2RkdHE20aeAOE/1cHx2g0YkyxUKN4kqmdpL3MY03tNn48+PjYMWiQHfHxBlasgKtX41m1Ko4CBdRjDAULYvD0JD4mRrdihtbUZrbi8baS97W0s4Zzzf7dd+HuXeJ27ky4CPzzT2jf3oGrV9V2njwav/wSR6NGWsq1hbOYNbSbrZE2s0x62sugaZbf7zMYDKxZs4YOHTqk+phy5crRu3dvhg8fnrBv//79vPTSS9y4cQOvFIp9jxo1itGjRyfbv3TpUtzc3CwNVwibdORIYSZOrEV0tLp+LFr0IZ99dgAvr0dJHmcfHU2cqX6UsGpRUVF06dIFgOXLl+Miy/TYDJdbt3B+8ICw0qUB+OOPQnz99Qs8eqQ6VwoVesTnnx/E2/uBnmEKoYtHjx7RrVs3wsLC8PDwMOvYTO8hBZW4Ps6UAz+538Tf35/BgwcnbIeHh+Pt7U3jxo0pYOoaEk9lNBoJCAigefPmKfZCi5RZY7u1aQPt2ml06KAREmLgxo3cfPZZM1avjqNOnf/+lvbvx75TJ2K3bYNKlbI0PmtsM2sXERGR8H2TJk3ImzevfsHYED3PNcP27Wh168JjnSKLFhkYM8ae2Fj1WebrG8+aNY4UKVI/S2N7FvkbNZ+0mWXupGPhlkxPSIsUKULIE2twh4aG4uDgkGpy6ezsjHMKPT2Ojo5yYphJ2swy1tZutWurslBt26q6+LdvG2jRwoElS6BjR6BWLejTB8fnntNt9oS1tZk1e7ydpN3Ml+VtFhYGXbvCiBHwySdoGowapSYsmbRrB8uW2ZErl/VWU5RzzXzSZuZJT1tl+l+On58fAQEBSfZt27aNWrVqyX+yEGYoUULVMWzSRG1HRcEbb8CkSaC55VIzft3cVBmax8ZgCyHSKU8eVZNt0CBiYqBXr6TJ6IABsGaNWhJUCGEZsxPShw8fcuLECU78t4zMxYsXOXHiBFeuXAHU7faePXsmPL5v375cvnyZwYMHc+bMGebPn8+8efMYMmRIxvwGQuQgefOqYvm9eqltTYOPP1ZL3MfGohLRl16CL77QM0whsofbt1XR+7g4qFiRew8dadkSFi9WPzYY1AXh1Klq4r0QwnJmJ6RBQUHUrFmTmjVrAjB48GBq1qzJ559/DkBwcHBCcgpQqlQpNm3aRGBgIDVq1GDMmDFMnTpVSj4JYSEnJ1iwAB6f9zd9Orz6KkTEOqsiiB9/rF+AQmQXgYEq2wwJ4dIlda0XGKh+5OICv/6qFk2TimtCpJ/ZY0gbNWrE0ybmL1y4MNm+hg0bcuzYMXNfSgiRCoMBPv8cSpaEd95RvaO//QYNG8KGDa3xKgTcvw8nT0KDBjpHK4SNev11aNmSoLPuvPxy4uJoBQvChg1Qp46+4QmRnVjv6GshxDP17AlbtyYuSXj0qPqQPH0aNaa0e3c12FQIkXbjxsGMGQBsCHSnYcPEZLRcOTXBUJJRITKWJKRC2LgmTWDfPvDxUdtXrvx3a7HJl7Bz539LOwkh0kTT4M4duHuXH36ADh3g0X8lf+vVg/37oUwZXSMUIluShFSIbKByZdVr8/zzajssDFq0d2XR/rIQEwP+/mqChhAiddHRYDAQP3ESH98dwcCBEB+vftSlCwQEgJTCFiJzSEIqRDbh5QW7dqn1tAGMRjUb//thN9F+/hmCgvQNUAhrdvYsPPcc0Tv3q3Jq3yfOVBo2DH7+WW42CJGZJCEVIhvJnRvWroX+/RP3Df7em/81/IeYJq3UDstXCxYi+/L25lGb13l5aCVWr1a77O1h9mxV+clOPi2FyFRZsnSoECLr2NvDtGlQujQMGaLyz7lLXLhwA36rOx7XsBCYMkVq1QgBarxoRAT/RPnQOmASFy6o3blzw8qV0Lq1vuEJkVPINZ8Q2ZDBAIMHwy+/JN5m3LEDvplXgPuOhfQNTghrMmAAD5t1wK+OlpCMFi0Ke/ZIMipEVpKEVIhsrGNHlYgWLKi2RwW/R8Wln3H0mAGuX9c3OCGswLqGk2h8cT5376k7BlWrqgmCNWroG5cQOY0kpEJkc35+6gP2uefUdkgIDKx3nLhSZeD33/UNTgg9xMejfTuR70eH0+F9L4JiawDQvDns3Qve3vqGJ0ROJAmpEDlAmTJw4ICqTwpwMKo6/WKnMesvWcVJ5Dyx/1zg0Wfj2T5qT8K+d96BjRvBw0PHwITIwSQhFSKHKFAAtm+Hzp1Bw4452ru8/4Ej3771F/E7d+kdnhBZ4kG4RruPylI8+l820RaAsWPhxx/B0VHn4ITIwSQhFSIHcXGBpUtVXUWTYgvH8s8bw4l8JOWgRPYWNmw8AaX+x5YtGvfJh5MTLFkCI0ZI0Qkh9CZln4TIYezsVF3FUqWgXz94N+5HnO9EU6GZgXXroJBMwhfZ0MmTMH9OcXLfiwcM5M2ravY2bKhzYEIIIBsnpHFxcRiNRr3D0I3RaMTBwYGoqCji4uL0DsdmpNRu9vb2OGbDe3n/+5+avNGpUy7uPczF3wfu8leJ7pT4ZSIl21bWOzwhMsy+H/+i9ceVePCgJwAlS8KmTVCxor5xCSESZbuEVNM0QkJCCAsLQ8vBK9JomkaRIkW4evUqBrkXlWaptZuzszMFCxbEI5vNeGjdWtVbbNsWom/EExMZS49ucXy1KXEClBC27Df/fbT5qj7V2cVe6vPCC7BhAxQurHdkQojHZbuENCwsjPv371OoUCFy5cqVY5Ox+Ph4Hj58SO7cubGTNe/S7Ml20zQNo9FIWFgY1/+r25ndktIaNVRZqLZtC9LiVACEQ4smsayYeJWXB5bSOzwhLKJp8NlnMO6rurRnNXupR/v2agy1m5ve0QkhnpStElJN0wgNDcXDw4OCpkrgOVR8fDwxMTG4uLhIQmqGlNrN1dUVd3d3rl27xu3bt7NdQgrq1v3evfD66xAQAMNiRvP8B/P47v55Bo90kwkfwqZER8PCxj+x90AJoBHr6MAHH8CkSWppXSGE9clWmUpcXBxxcXHZMmEQ+jIYDOTJk4fo6OhsOzbZw0PVYXz7bfiej+jNQoZ87ka/fhAbq3d0QqTNvXvQqkU8JQ8spT3rMBhg8mSYMkWSUSGsWbbqIY3971PTwSFb/VrCSpgmNsXFxWXLSU6g6jDOnQulS+dn5MgWat+sqXx4qiFfba6Ou7vOAQrxFBcvQqeWYQSdy8MB1mHn4szqZdChg96RCSGeJVv1kJrk1HGjInPllPPKYFB1GZcsgdyO0fRkEbn2baVBA7hxQ+/ohEjZ4cMwq9oMfjlXnTzcx6OQCzsDDZKMCmEjpCtRCJGi7t2heHFnXmm/m+AwVzgBLWvdYenWAlStqnd0QiRatw66doX8ka/wiDiKlM/Lpk1QurTekQkh0ipb9pAKITJGw4bw+wE3SpY0UIsj7A8uyUd1DhAQoHdkQihTp2j83mEahsgIrlOckw0Gsn+/JKNC2BpJSIUQT1WxoioL5ehbnVGMYvcjX9q0gfnz9Y5M5GRxcTBoEEwedJGxjKAZ2+nWDbZtg/z59Y5OCGEuixLSGTNmUKpUKVxcXPD19WXPnj2pPjYwMBCDwZDs6++//7Y4aCFE1ipcGLbvduLf9h9jxImysWf4450pfPaZqvcoRFZ69Aje7PCQaVPiuEhpynKeqiPas3gxODvrHZ0QwhJmJ6QrVqxg0KBBjBgxguPHj1O/fn1at27NlStXnnrc2bNnCQ4OTvh67rnnLA5apM50ATB69OhMff5Ro0ZlyvOnVXx8PNWrV6dNmzYWHX/+/HkcHByYMWNGBkeWfbm5wapV8OGH0JrN9GEu3419RO/e9hiNcrNFZI37951o1Uzjo9+aMJ7h2NvD+B89GTsWpOSyELbL7D/fSZMm8c4779CnTx8qVqzI5MmT8fb2ZubMmU89ztPTkyJFiiR82UtBOJEOCxcu5OTJkxYnxmXLlqV79+6MGjWK8PDwjA0uG7O3VzUdS0wejB8HicSN28sCcOqzmHs3IvUOT2RzF7aeo8B7s/kzKJofGMA6t25s3Ah9+ugdmRAivcyaZR8TE8PRo0cZNmxYkv0tWrRg//79Tz22Zs2aREVFUalSJUaOHEnjxo1TfWx0dDTR0dEJ26aEwWg0PrUoudFoRNM04uPjiY+PT8uvlO2Yfm/tv/uopvbIjOfXq43j4uIYPXo0DRs2pFatWhbH8fHHH7No0SKmTJnCiBEjgKe3W3x8fMJSojn9gqpfPyhWzJnhb15iQ3Q7nMOMnKhwhQfbf8HnxSJ6h2f1Hn8fe9b7mlBOTdtN2Y9fpTwPWEZX+hVdy+p1GtWrG5HmezrT+SXnWdpJm1kmPe1lVkJ6+/Zt4uLiKFy4cJL9hQsXJiQkJMVjvLy8mDNnDr6+vkRHR7N48WKaNm1KYGAgDRo0SPGYCRMmpHjLeefOnbg9ZRFiBwcHihQpwsOHD4mJiTHjN8s+Hj16BJDw+z948CBTnj86Olq3nsXNmzdz5coVhgwZkq4YfHx8qFKlCnPmzKF///5JllhNqd1iYmKIjIxk9+7dCYsw5GSOjvBZt78otyAWA/BX1CHu1K/L0kGjyNswZy/d+yxRUVEJ3+/YsQMXFxcdo7F+D6ce4+Ud46hGHADrHM4xYcgqrl934fp1nYOzIQFSHsNs0mbmMeUIlrCoDumTBcI1TUu1aHj58uUpX758wrafnx9Xr15l4sSJqSak/v7+DB48OGE7PDwcb29vGjduTIECBVKNKyoqiqtXr5I7d+4c+wZvStidnJz4448/GDduHAcOHMDOzo7GjRszadIkSpYsmfD4hQsX8s477zBv3jx69+6d5LkCAwNp2rQpn3/+OV988UWS53d2dubEiRN88cUXHD16FCcnJ1q2bMnXX39N8eLFk8W1e/duJk6cyMGDB3nw4AE+Pj506tQJf3//JBcZj79mixYtGD16NIcPHyYsLIy4OPVh9Msvv2AwGOjevXuKy8RWrVqVv/76K9U2mjBhAkOHDgWgS5cujBw5kiNHjtC8eXM0TePBgwe4u7snO6ejoqJwdXWlQYMGOfb8SqZNG+q2bYxL5zdwi7uGm3aNTt8P4LjTUl4Y87Le0VmtiIiIhO+bNGlC3rx59QvGisVGx7G/wQjaH58EwCVgv3sjihxdRqWSqX8WiKSMRiMBAQE0b948264yl9GkzSxz584di481KyEtWLAg9vb2yXpDQ0NDk/WaPk2dOnVYsmRJqj93dnbGOYWpko6Ojk89MeLi4jAYDNjZ2SXp7cpJTL/30aNH+e6772jYsCHvvfcex48fZ926dfz555/8+eefCQmV6fEptZlp29Smj+87dOgQX331FW3btuWDDz7g2LFjLF++nH379nHkyJEk58OsWbPo168f+fLlo127dhQqVIgjR44wfvx4AgMD2blzJ05OTkme/8CBA0yYMIHGjRvzv//9jytXrmBnZ4emaezatYsKFSqQP5XaLl27dk3WgxkdHc3kyZOJjo6mQYMGCa9Tt25dQCXCLVu2TLhN//jv/Hh7GAyGZ56HOY3PK76smzyBKv6TqfDwKK5EUefrjuz762vqrR2CwS5nrHBljsfPHzmfUnbv6kPO+Han6a31Cft2Vh3IvZGNeaFkAWkzC8i5Zj5pM/Okp63MSkidnJzw9fUlICCAV199NWF/QEAA7du3T/PzHD9+HC8vL3NeWphp06ZNCb2epsSqZ8+eLF68mLVr19KlS5d0Pf/WrVuZO3cu77zzTsK+L7/8ki+++ILhw4czb948AP766y8GDhxIjRo12L59e5Ik8quvvsLf359p06bx8ccfJ3n+gIAA5s2bx9tvv51k/5kzZ7h79y6tW7dONbaRI0cm2Y6KiqJDhw7ExMQwb968hCQUoFatWgDPHAMtns7R253iF3awv9Z71L2yHDs06m8Yyu6yf1IraDZu+aVHWaTdv1vOEdv+NerG/AlAHHbs7/YD9Rb2YdOmTTpHJ4TIDGbfsh88eDA9evSgVq1a+Pn5MWfOHK5cuULfvn0Bdbv9+vXrLFq0CIDJkydTsmRJKleuTExMDEuWLGHVqlWsWrUqY3+TZ6hVC1IZ5mo1ihSBoKCMea4GDRrw2muvJdn39ttvs3jxYo4cOZLuhLR8+fLJksVPPvmEH374gWXLljFz5kycnJyYPXs2sbGxTJ06NVmP5tChQ5k0aRLLli1LlpDWrFkz2fMDXLt2DSDNPfKPHj3ilVdeITAwkIULF9KjR48kP3d3d8fFxSXheYV5IiMjqV+/PmFhYRw9ehS/i0v5vXFFmu5WQzwaXFzE6eKnyb1lFSUalNA5WmELgkb9RvnRXXHnIQBhhjxc/vYXavWrh5+fH2FhYTRu3Fh6rYTIZsxOSDt37sydO3f48ssvCQ4OpkqVKmzatIkSJdSHTXBwcJKapDExMQwZMoTr16/j6upK5cqV2bhxo8X1Iy0VEkKOGvxes2bNZPtMYzvv37+f7ud/6aWXko2xdHV1xdfXly1btvDPP/9QpUoVDh48CMCWLVvYvn17sudxdHRMcZGEF198McXXNY1PyZcv3zNjjIiI4OWXX2bPnj0sXryYrl27pvi4/Pnzc/v27Wc+n0guPj6eo0ePJnxvsDPQdNfnHPy4AtUn9cKVKCpHHuVeo5oc/nI5L45soXPEwlrFGePZ2+JLGgYmTmi95Fweh43rqNa0PBEREUnONSFE9mLRpKZ+/frRr1+/FH+2cOHCJNtDhw5NmECipyI2UIkmI2PMkydPsn0ODuq/2zQ5KD08PT1T3G/quQwLCwPg7t27AIwbN86s50+tB9TV1RVQPXNP8+DBA9q0acPBgwdZvnw5r7/+eqqPjYyMfGr1BmG+Ot914kL9Cth3eo0Sxn/Jp92j1met2LFzDI22+mPnkDPHeIuUhf59l0svdafh3S0J+/YXfZ1qQfPJ7eWuY2RCiKxiUUJqizLqVnh2YxpfmlIZI1NSmZLQ0NAU99+8eRNITIhNs+DDw8Nxd0/7B0tqVRsKFSoEJCa6KQkPD6dVq1YEBQXxyy+/0KFDh1QfGx8fT1hYGJUrV05zbCJtSneoRvj5IIJqv0mtkI3YodFkx0iOFN1PqV0LKVixkN4hCitwbNo+Cg/qyovxVwE1XjSw5QQab/wEO3uZECdETiHdFDmc6db39RTGMxw/fjzV4/bt25dQRN4kMjKSo0eP4urqSrly5QCoXbs2QMKt+/SqXLkydnZ2nDt3LsWf379/n+bNm3Ps2DFWr1791GQU4Ny5c8THx1O1atUMiU8k5eGTF99r69ndYizxqOTihVub0KpUIeibHTpHJ/QUZ4xnR7PxVP+gAcX+S0bvGgpweuIWmm4ZKsmoEDmMJKQ53PPPP4/BYGD58uVJinWfO3eOKVOmpHrc2bNnmT9/fpJ93377Lbdu3aJr164JZZz69euHg4MDAwcO5OrVq8me5/79+09NfJ+UN29eqlWrRlBQULKE+O7duzRt2pSTJ0+yZs0aXn752XUwDx06BEDDhg3THIMwj8HejgZbR3BiwhbuGtTEtkLxoTz/aTN+rz2c6IeyEkpOE3oyhBNFWtLk9xHYo8aDnszbgPhjJ6j2cXOdoxNC6CHH3LIXKStWrBidO3dm+fLl+Pr60qpVK0JDQ1mzZg2tWrVKtRpCixYt6NevHxs3bqRChQocO3aMrVu34u3tzfjx4xMeV6VKFWbMmMH7779P+fLladOmDWXKlCE8PJwLFy6wa9cuevfuzaxZs9Icc4cOHRg1ahRHjhxJMvmpa9euHDt2jMaNG3Po0KGEZNOkaNGi/O9//0uyLyAgAHt7+zQlryJ9nh/WgtA2pznapCe+dwKwQ6Pp4Qn86bkd17XLKNOijN4hiiywb8gaKk3qg6+mht3EY2Bvo894actn2DvLR5IQOZX89QvmzZtHoUKFWLlyJdOnT6d8+fLMmTOHokWLppqQ+vn5MWLECEaOHMmUKVNwcnKiS5cufPPNN8kmJL377rvUqFGDSZMmsXv3btavX0+ePHnw8fHho48+olevXmbF26dPH8aMGcOSJUsSEtL4+Hj27t0LqCVmd+7cmey4N954I0lC+ujRI9auXUu7du0oWrSoWTGIRAULFkzzUr2e1YpQMGQLe16diN9vw3EgjiqRR4hoWY09Xb6l3pK+GOzlxk12dO9SGKcaf0CDS4sS9oXYeRH87c80GNw4Tc9hzrkmhLAxmg0ICwvTAO327dtPfVxkZKT2119/aZGRkVkUmfWKi4vT7t27p8XFxekdSqbo2rWrVqBAAe3hw4cWP8e8efM0QNu1a1fCvqe1m5xfKYuJidHWrl2rxcTEmHXc2UUHtcuOpTUNEr6O5WusXdl7OZMitR4PHz7UAA3Q7t27p3c4me7guO1asF3RJP/Xh73aabf/umnW81h6ruV00m7mkzazzO3btzVACwsLM/tY6YoQNmncuHE8fPiQ6dOnW3R8bGws48eP55VXXqFBgwYZHJ1Ii3I9alPw2h/srtQ3YV/NezvxqFeV7V3nERerPeVoYQvCr4axs1I/ao9oRpH4G2ofHhx4byG1rq2jQMWUy8cJIXIeSUiFTSpVqhQ//fQTuXLlsuj4a9eu8eabbzJp0qQMjkyYw80zNw1Oz+TwmK0E2xcDIA/hNFvehz/z1efchuSLJgjrp8VrHBiyiuiS5Wl8ZmbC/j/yNyby8Cn8ZvXCYCez6IUQiWQMqbBZnTt3tvjYkiVLMmrUqIwLJoeKjIykVatW3LlzJ13LOb44sgXhPf/kQMuP8Pt7IQDVH+4j5pVq7GroT511/jjnccnAyEVmubTnKsEdB+B3a33Cvofk4mSXCfgt6W/xGOGMOteEENZJekiFEBaLj49n9+7dnD59Ot3LOXr45MXvzAJOTdzCNYeSADhhpOGuLwktWImDX25Dk7v4VisqLJrtzb+mUIMKSZLRIwVbc3fPX9RdNjBdE9Yy8lwTQlgfSUiFEFal6sctKXjzNIF+/hj/u4njHXuROl+05JBnO85t/EfnCMXjtHiNo6PWc7NQZZptH0YuHgEQaleYQx+vpNbNjfjU89E5SiGEtZOEVAhhdVzyu9Fo/3gu/HqcP939EvbXuf0bpV6uxM6ag7l38b5+AQoATv/yF8cLNsd3dHtKGP8F1NKf+6v3xe3SGWpPfEPGigoh0kQSUiGE1SrfsQqV7+3l8MDFhNirWrEOxNH4xPfElynLrvaTiLwbqXOUOc/lvVcILP02FTpV5fl7vyfsP+7RkAu/HKPuiZnk9s6nY4RCCFuTLRNSTQaaiUwg55U+DPZ2vDj1TfKE/MPuRp8TiZrcVEC7Q8P1H3Ov0HPs6DKbqAeyBGlmu/1XKIE1B1G0fhkaXVyQsOxniH1R9n6wkup3d/Lc69V1jlIIYYuyVULq4KDGm8XGxuociciOjEaV8Njb2+scSc7kWjAXDXaOJuzQWfaV7E486lZw0fjrNFnRlzt5y7Cj50KiH0pimtFC/rjJ7y8Ow7lyGRqdmIIj6j32viEv+16eQL5b56g35Q3s7OX2vBDCMtkqIbW3t8fe3p7w8HC9QxHZjKZphIWF4ezsLOVmnuDm5oazs3OWvV6RF3146eISzv/6B4e92ifsLxZ/lSaL3+JW3rLseWMqEaERWRZTdnUx8DI7Kg8gT42SND3yNe48BOARruyuOwy7ixd4acMwnPO5ZUk8WX2uCSGyTraqQ2owGPD09CQ4OBhnZ2dy5cqFwZAzr9jj4+OJiYkhKioKO7tsdd2RqZ5sN03TMBqNhIWF8fDhQ4oVK6Z3iFYlV65c3L9/n02bNlm8SIGlynWsCh3XcnbxYSIHD6fGbTWWsXjcFYr/+iH3V40ioO6HVPqhH8VqFMrS2Gzd30uPETr8e+peXkYp4hL2x+DIwcp9KL/kMxrU8MrSmPQ814QQmS9bJaQAefLkITIyktu3b3Pr1i29w9GNpmlERkbi6uqaY5NyS6TWbs7OzhQrVgwPDw8doxMpKd/jReixnb/n7iVsxNfUDv0NgLzaPZrvG0V0zfHs9XmdvCMHUKVPHZC/hxRFhsVwZNgq8v08jaoPDlDhsZ89JBfHXuhLhTmDaVCjqG4xCiGyr2yXkBoMBry8vPD09EwY85cTGY1Gdu/eTYMGDeQWsxlSajd7e3tpQxtQoU896FOPv1ac4p7/N9S+uAwH4nAmhnpXlsL/lnL2wxoEd3ifauM6k79UHr1DtgrnN5/jwhc/US1oHg20kCQ/Czd48Efjj6j240AalC6gU4RCiJwg2yWkJqbxpDmVvb09sbGxuLi4SDJlBmk380RFRfHaa68RGhpKkyZNrKLNKnWuCp0Xc+vIGP75cDpVDv5IHi0MgPKRJyi/7D0il33IvmKvYOjVk1rDW+CUS/+4s9Lt8/c5M2oF+dcvoPKDQ5R94uf/OFfhxmsDqT21O/ULWsftcWs814QQGSfbJqRCiMwXFxfH5s2bE763JoVeKEmh/d8Sde9L9nyykrzLZlD10WEAXInipesrYfxK7k3Ix/7K3cjdvQNVBzTEOXf2THRu/3OXUxM24PrbL9S8HUB9YpL8PBZ7jvl0wPWTgVTp14ByVlbQ3prPNSFE+klCKoTI1lzyuVJ/bi+Y24t/lh0l9JuFVPxjOQW02wDk0+7R6M/p4D+dh/65OFK8HbEvd6DSh83xrJBf5+gtp8VrnNv4DzcWbiPvjtVUub+HxiRP5M45VSK49dtUndCdFysW0SFSIYSwsOzTjBkzKFWqFC4uLvj6+rJnz56nPn7Xrl34+vri4uJC6dKlmTVrlkXBCiFEepTr6ku949PIE3GDY6PWc8j7daJILCOUmwjqXVtOo1ldKFixIP+4VWfXi59w4PPN3Lr4UMfI00DTuLr/KoHv/syu0r256Viccq9UoNHqD6hxPxCHx5LRO3aF2Ov7AaeXHKNs5J80WPsx+SQZFULoyOwe0hUrVjBo0CBmzJjBSy+9xOzZs2ndujV//fUXPj4+yR5/8eJF2rRpw7vvvsuSJUvYt28f/fr1o1ChQnTs2DFDfgkhhDCHg6sjz3/RDr5oR/j1Bxz/bgva2rVUvbQBd+0BAHZolIs8SbkjJ+HIROLHGPjHqRIhPi9iePEFCrSoRfHWVfHwdMny+DUNbvxxi8u/neJR4GFy/3mQsrf24x1/C+9Ujrnu4MP55ztRuO9rlO9Zm3r2Ug5OCGE9zE5IJ02axDvvvEOfPn0AmDx5Mlu3bmXmzJlMmDAh2eNnzZqFj48PkydPBqBixYoEBQUxceJESUiFELrzKOaO36Q3YNIbxEUZOf3jbu4t2UiRP7ZSNvqvhMfZoVEu5jTlzp+G8wtgKcRj4LJ9SW55lOWBdyUMFcqTq4I3bmWLkqeCFwUreeKSy7LJlcZHRkKO3eDW8Ws8OHON6PNXcbp8jvw3/6JY+BmKaXd4WlXcaJw4XbARD+s0o0jPFjzXsRrFrGxcqBBCmJiVkMbExHD06FGGDRuWZH+LFi3Yv39/isccOHCAFi1aJNnXsmVL5s2bh9FoNGumZEREBC4uWd8bYYuMRiNRUVFERETIbFQzSLuZJyIiIsn32aHNSr5dh5Jv1wHGcPHMba4s2UPs77vw/PcAPpFncPhv/XZFo2DcRQreuwj3AuBk0ueKwUAo+Yiyz02EUx6i7N0xOrjwwJDYO3mobEec4+1wjnlI7ui7uMU9wEWLwINw8gOpjWJ9ch2qCNw4n8eXh1XqkLdDQ8q9WYfy7onvl48iH6WjVfSXHc+1rCLva+aTNrPM43+n5jIrIb19+zZxcXEULlw4yf7ChQsTEhKS4jEhISEpPj42Npbbt2/j5ZV8tY/o6Giio6MTtsPCVMmWEiVKmBOuECILFS9eXO8QrJAG3IW4uxCZ8iNa3dmRQa/1CML2wL49sO9b+CSDntYKybkmhHXTNM3sYywaRPTkyj+apj11NaCUHp/SfpMJEyaQJ0+ehK+UxqYKIYQQQgjrc+fOHbOPMauHtGDBgtjb2yfrDQ0NDU3WC2pSpEiRFB/v4OBAgQIpr/zh7+/P4MGDE7bv379PiRIluHLlCnnyyOoqaREeHo63tzdXr16V5S7NIO1mPmkzy0i7mU/azDLSbuaTNrNMWFgYPj4+5M9vfsk8sxJSJycnfH19CQgI4NVXX03YHxAQQPv27VM8xs/Pjw0bNiTZt23bNmrVqpXquAxnZ2ecnZ2T7c+TJ4+cGGby8PCQNrOAtJv5pM0sI+1mPmkzy0i7mU/azDJ2dubfgDf7iMGDBzN37lzmz5/PmTNn+Oijj7hy5Qp9+/YFVO9mz549Ex7ft29fLl++zODBgzlz5gzz589n3rx5DBkyxOxghRBCCCFE9mN22afOnTtz584dvvzyS4KDg6lSpQqbNm1KmHAUHBzMlStXEh5fqlQpNm3axEcffcT06dMpWrQoU6dOlZJPQgghhBACsHDp0H79+tGvX78Uf7Zw4cJk+xo2bMixY8cseSlA3cL/4osvUryNL1ImbWYZaTfzSZtZRtrNfNJmlpF2M5+0mWXS024GzZK5+UIIIYQQQmQQWTtOCCGEEELoShJSIYQQQgihK0lIhRBCCCGEriQhFUIIIYQQurLZhDQ6OpoaNWpgMBg4ceKE3uFYvVdeeQUfHx9cXFzw8vKiR48e3LhxQ++wrNalS5d45513KFWqFK6urpQpU4YvvviCmJgYvUOzeuPGjaNu3bq4ubmRN29evcOxSjNmzKBUqVK4uLjg6+vLnj179A7Jqu3evZt27dpRtGhRDAYDa9eu1TskqzdhwgReeOEF3N3d8fT0pEOHDpw9e1bvsKzezJkzqVatWkJBfD8/PzZv3qx3WDZlwoQJGAwGBg0aZNZxNpuQDh06lKJFi+odhs1o3LgxK1eu5OzZs6xatYp///2X119/Xe+wrNbff/9NfHw8s2fP5vTp03z//ffMmjWL4cOH6x2a1YuJieGNN97g/fff1zsUq7RixQoGDRrEiBEjOH78OPXr16d169ZJ6jeLpCIiIqhevTo//PCD3qHYjF27dtG/f38OHjxIQEAAsbGxtGjRgoiICL1Ds2rFixfnq6++IigoiKCgIJo0aUL79u05ffq03qHZhCNHjjBnzhyqVatm/sGaDdq0aZNWoUIF7fTp0xqgHT9+XO+QbM66des0g8GgxcTE6B2Kzfjmm2+0UqVK6R2GzViwYIGWJ08evcOwOi+++KLWt2/fJPsqVKigDRs2TKeIbAugrVmzRu8wbE5oaKgGaLt27dI7FJuTL18+be7cuXqHYfUePHigPffcc1pAQIDWsGFD7cMPPzTreJvrIb158ybvvvsuixcvxs3NTe9wbNLdu3f5+eefqVu3Lo6OjnqHYzPCwsLInz+/3mEIGxYTE8PRo0dp0aJFkv0tWrRg//79OkUlcoKwsDAAeQ8zQ1xcHMuXLyciIgI/Pz+9w7F6/fv3p23btjRr1syi420qIdU0jd69e9O3b19q1aqldzg259NPPyVXrlwUKFCAK1eusG7dOr1Dshn//vsv06ZNo2/fvnqHImzY7du3iYuLo3Dhwkn2Fy5cmJCQEJ2iEtmdpmkMHjyYevXqUaVKFb3DsXqnTp0id+7cODs707dvX9asWUOlSpX0DsuqLV++nGPHjjFhwgSLn8MqEtJRo0ZhMBie+hUUFMS0adMIDw/H399f75CtQlrbzeSTTz7h+PHjbNu2DXt7e3r27ImWwxbqMrfNAG7cuEGrVq1444036NOnj06R68uSdhOpMxgMSbY1TUu2T4iMMmDAAE6ePMmyZcv0DsUmlC9fnhMnTnDw4EHef/99evXqxV9//aV3WFbr6tWrfPjhhyxZsgQXFxeLn8cqlg69ffs2t2/ffupjSpYsSZcuXdiwYUOSN+64uDjs7e3p3r07P/30U2aHalXS2m4pnSDXrl3D29ub/fv356hbEea22Y0bN2jcuDG1a9dm4cKF2NlZxTVclrPkXFu4cCGDBg3i/v37mRyd7YiJicHNzY1ffvmFV199NWH/hx9+yIkTJ9i1a5eO0dkGg8HAmjVr6NChg96h2ISBAweydu1adu/eTalSpfQOxyY1a9aMMmXKMHv2bL1DsUpr167l1Vdfxd7ePmFfXFwcBoMBOzs7oqOjk/wsNQ6ZGWRaFSxYkIIFCz7zcVOnTmXs2LEJ2zdu3KBly5asWLGC2rVrZ2aIVimt7ZYS03VIdHR0RoZk9cxps+vXr9O4cWN8fX1ZsGBBjk1GIX3nmkjk5OSEr68vAQEBSRLSgIAA2rdvr2NkIrvRNI2BAweyZs0aAgMDJRlNB03TctxnpTmaNm3KqVOnkux76623qFChAp9++mmaklGwkoQ0rXx8fJJs586dG4AyZcpQvHhxPUKyCYcPH+bw4cPUq1ePfPnyceHCBT7//HPKlCmTo3pHzXHjxg0aNWqEj48PEydO5NatWwk/K1KkiI6RWb8rV65w9+5drly5QlxcXEKd4LJlyyb8zeZkgwcPpkePHtSqVQs/Pz/mzJnDlStXZHzyUzx8+JDz588nbF+8eJETJ06QP3/+ZJ8LQunfvz9Lly5l3bp1uLu7J4xRzpMnD66urjpHZ72GDx9O69at8fb25sGDByxfvpzAwEC2bNmid2hWy93dPdnYZNN8FbPGLGfonP8sdvHiRSn7lAYnT57UGjdurOXPn19zdnbWSpYsqfXt21e7du2a3qFZrQULFmhAil/i6Xr16pViu+3cuVPv0KzG9OnTtRIlSmhOTk7a888/L6V4nmHnzp0pnlO9evXSOzSrldr714IFC/QOzaq9/fbbCX+bhQoV0po2bapt27ZN77BsjiVln6xiDKkQQgghhMi5cu6gOCGEEEIIYRUkIRVCCCGEELqShFQIIYQQQuhKElIhhBBCCKErSUiFEEIIIYSuJCEVQgghhBC6koRUCCGEEELoShJSIYQQQgihK0lIhRBCCCGEriQhFUIIIYQQupKEVAghstD48eMxGAzJviZNmqR3aEIIoRtZy14IIbLQgwcPiIiISNj+8ssv2bRpE3v37qV48eI6RiaEEPpx0DsAIYTISdzd3XF3dwdg9OjRbNq0iV27dkkyKoTI0eSWvRBC6GD06NEsWLCAXbt2UaJECb3DEUIIXUlCKoQQWUySUSGESEoSUiGEyEKSjAohRHIyhlQIIbLI2LFj+eGHH/jtt99wdnYmJCQEgHz58uHs7KxzdEIIoR+ZZS+EEFlA0zTy5s1LeHh4sp8dPHiQ2rVr6xCVEEJYB0lIhRBCCCGErmQMqRBCCCGE0JUkpEIIIYQQQleSkAohhBBCCF1JQiqEEEIIIXQlCakQQgghhNCVJKRCCCGEEEJXkpAKIYQQQghdSUIqhBBCCCF0JQmpEEIIIYTQlSSkQgghhBBCV5KQCiGEEEIIXUlCKoQQQgghdPV/5WIvF6V5bCwAAAAASUVORK5CYII=\n",
|
||
"text/plain": [
|
||
"<Figure size 800x350 with 1 Axes>"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows what the Huber loss looks like\n",
|
||
"\n",
|
||
"import matplotlib.pyplot as plt\n",
|
||
"\n",
|
||
"plt.figure(figsize=(8, 3.5))\n",
|
||
"z = np.linspace(-4, 4, 200)\n",
|
||
"z_center = np.linspace(-1, 1, 200)\n",
|
||
"plt.plot(z, huber_fn(0, z), \"b-\", linewidth=2, label=\"huber($z$)\")\n",
|
||
"plt.plot(z, z ** 2 / 2, \"r:\", linewidth=1)\n",
|
||
"plt.plot(z_center, z_center ** 2 / 2, \"r\", linewidth=2)\n",
|
||
"plt.plot([-1, -1], [0, huber_fn(0., -1.)], \"k--\")\n",
|
||
"plt.plot([1, 1], [0, huber_fn(0., 1.)], \"k--\")\n",
|
||
"plt.gca().axhline(y=0, color='k')\n",
|
||
"plt.gca().axvline(x=0, color='k')\n",
|
||
"plt.text(2.1, 3.5, r\"$\\frac{1}{2}z^2$\", color=\"r\", fontsize=15)\n",
|
||
"plt.text(3.0, 2.2, r\"$|z| - \\frac{1}{2}$\", color=\"b\", fontsize=15)\n",
|
||
"plt.axis([-4, 4, 0, 4])\n",
|
||
"plt.grid(True)\n",
|
||
"plt.xlabel(\"$z$\")\n",
|
||
"plt.legend(fontsize=14)\n",
|
||
"plt.title(\"Huber loss\", fontsize=14)\n",
|
||
"plt.show()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"To test our custom loss function, let's create a basic Keras model and train it on the California housing dataset:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 72,
|
||
"metadata": {
|
||
"tags": []
|
||
},
|
||
"outputs": [],
|
||
"source": [
|
||
"# extra code – loads, splits and scales the California housing dataset, then\n",
|
||
"# creates a simple Keras model\n",
|
||
"\n",
|
||
"from sklearn.datasets import fetch_california_housing\n",
|
||
"from sklearn.model_selection import train_test_split\n",
|
||
"from sklearn.preprocessing import StandardScaler\n",
|
||
"\n",
|
||
"housing = fetch_california_housing()\n",
|
||
"X_train_full, X_test, y_train_full, y_test = train_test_split(\n",
|
||
" housing.data, housing.target.reshape(-1, 1), random_state=42)\n",
|
||
"X_train, X_valid, y_train, y_valid = train_test_split(\n",
|
||
" X_train_full, y_train_full, random_state=42)\n",
|
||
"\n",
|
||
"scaler = StandardScaler()\n",
|
||
"X_train_scaled = scaler.fit_transform(X_train)\n",
|
||
"X_valid_scaled = scaler.transform(X_valid)\n",
|
||
"X_test_scaled = scaler.transform(X_test)\n",
|
||
"\n",
|
||
"input_shape = X_train.shape[1:]\n",
|
||
"\n",
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Dense(30, activation=\"relu\", kernel_initializer=\"he_normal\",\n",
|
||
" input_shape=input_shape),\n",
|
||
" tf.keras.layers.Dense(1),\n",
|
||
"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 73,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model.compile(loss=huber_fn, optimizer=\"nadam\", metrics=[\"mae\"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 74,
|
||
"metadata": {
|
||
"scrolled": true
|
||
},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 0.4858 - mae: 0.8357 - val_loss: 0.3479 - val_mae: 0.6527\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.2415 - mae: 0.5419 - val_loss: 0.2630 - val_mae: 0.5473\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x19a5004c0>"
|
||
]
|
||
},
|
||
"execution_count": 74,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Saving/Loading Models with Custom Objects"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 75,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_loss/assets\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_loss/assets\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"model.save(\"my_model_with_a_custom_loss\") # extra code – saving works fine"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 76,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model = tf.keras.models.load_model(\"my_model_with_a_custom_loss\",\n",
|
||
" custom_objects={\"huber_fn\": huber_fn})"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 77,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 0.2052 - mae: 0.4910 - val_loss: 0.2210 - val_mae: 0.4946\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.1888 - mae: 0.4683 - val_loss: 0.2021 - val_mae: 0.4773\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x19a876dd0>"
|
||
]
|
||
},
|
||
"execution_count": 77,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 78,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"def create_huber(threshold=1.0):\n",
|
||
" def huber_fn(y_true, y_pred):\n",
|
||
" error = y_true - y_pred\n",
|
||
" is_small_error = tf.abs(error) < threshold\n",
|
||
" squared_loss = tf.square(error) / 2\n",
|
||
" linear_loss = threshold * tf.abs(error) - threshold ** 2 / 2\n",
|
||
" return tf.where(is_small_error, squared_loss, linear_loss)\n",
|
||
" return huber_fn"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 79,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model.compile(loss=create_huber(2.0), optimizer=\"nadam\", metrics=[\"mae\"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 80,
|
||
"metadata": {
|
||
"scrolled": true
|
||
},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 0.2051 - mae: 0.4598 - val_loss: 0.2249 - val_mae: 0.4582\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.1982 - mae: 0.4531 - val_loss: 0.2035 - val_mae: 0.4527\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x19abec4f0>"
|
||
]
|
||
},
|
||
"execution_count": 80,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 81,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_loss_threshold_2/assets\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_loss_threshold_2/assets\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"model.save(\"my_model_with_a_custom_loss_threshold_2\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 82,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model = tf.keras.models.load_model(\"my_model_with_a_custom_loss_threshold_2\",\n",
|
||
" custom_objects={\"huber_fn\": create_huber(2.0)})"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 83,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 0.1935 - mae: 0.4465 - val_loss: 0.2020 - val_mae: 0.4410\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.1899 - mae: 0.4422 - val_loss: 0.1867 - val_mae: 0.4399\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x19ae75c30>"
|
||
]
|
||
},
|
||
"execution_count": 83,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 84,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class HuberLoss(tf.keras.losses.Loss):\n",
|
||
" def __init__(self, threshold=1.0, **kwargs):\n",
|
||
" self.threshold = threshold\n",
|
||
" super().__init__(**kwargs)\n",
|
||
"\n",
|
||
" def call(self, y_true, y_pred):\n",
|
||
" error = y_true - y_pred\n",
|
||
" is_small_error = tf.abs(error) < self.threshold\n",
|
||
" squared_loss = tf.square(error) / 2\n",
|
||
" linear_loss = self.threshold * tf.abs(error) - self.threshold**2 / 2\n",
|
||
" return tf.where(is_small_error, squared_loss, linear_loss)\n",
|
||
"\n",
|
||
" def get_config(self):\n",
|
||
" base_config = super().get_config()\n",
|
||
" return {**base_config, \"threshold\": self.threshold}"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 85,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"# extra code – creates another basic Keras model\n",
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Dense(30, activation=\"relu\", kernel_initializer=\"he_normal\",\n",
|
||
" input_shape=input_shape),\n",
|
||
" tf.keras.layers.Dense(1),\n",
|
||
"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 86,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model.compile(loss=HuberLoss(2.), optimizer=\"nadam\", metrics=[\"mae\"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 87,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 0.6492 - mae: 0.8468 - val_loss: 0.5093 - val_mae: 0.6723\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.2912 - mae: 0.5552 - val_loss: 0.3715 - val_mae: 0.5683\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x19b1356c0>"
|
||
]
|
||
},
|
||
"execution_count": 87,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 88,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_loss_class/assets\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_loss_class/assets\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"model.save(\"my_model_with_a_custom_loss_class\") # extra code – saving works"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 89,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model = tf.keras.models.load_model(\"my_model_with_a_custom_loss_class\",\n",
|
||
" custom_objects={\"HuberLoss\": HuberLoss})"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 90,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 0.2416 - mae: 0.5034 - val_loss: 0.2922 - val_mae: 0.5057\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.2173 - mae: 0.4774 - val_loss: 0.2503 - val_mae: 0.4843\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x19a781c60>"
|
||
]
|
||
},
|
||
"execution_count": 90,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows that loading worked fine, the model can be used normally\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 91,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"2.0"
|
||
]
|
||
},
|
||
"execution_count": 91,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.loss.threshold # extra code – the treshold was loaded correctly"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Other Custom Functions"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 92,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"def my_softplus(z):\n",
|
||
" return tf.math.log(1.0 + tf.exp(z))\n",
|
||
"\n",
|
||
"def my_glorot_initializer(shape, dtype=tf.float32):\n",
|
||
" stddev = tf.sqrt(2. / (shape[0] + shape[1]))\n",
|
||
" return tf.random.normal(shape, stddev=stddev, dtype=dtype)\n",
|
||
"\n",
|
||
"def my_l1_regularizer(weights):\n",
|
||
" return tf.reduce_sum(tf.abs(0.01 * weights))\n",
|
||
"\n",
|
||
"def my_positive_weights(weights): # return value is just tf.nn.relu(weights)\n",
|
||
" return tf.where(weights < 0., tf.zeros_like(weights), weights)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 93,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"layer = tf.keras.layers.Dense(1, activation=my_softplus,\n",
|
||
" kernel_initializer=my_glorot_initializer,\n",
|
||
" kernel_regularizer=my_l1_regularizer,\n",
|
||
" kernel_constraint=my_positive_weights)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 94,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 1.4714 - mae: 0.8316 - val_loss: inf - val_mae: inf\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.8094 - mae: 0.6172 - val_loss: 2.6153 - val_mae: 0.6058\n",
|
||
"INFO:tensorflow:Assets written to: my_model_with_many_custom_parts/assets\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_many_custom_parts/assets\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 0.6333 - mae: 0.5617 - val_loss: 1.1687 - val_mae: 0.5468\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.5570 - mae: 0.5303 - val_loss: 1.0440 - val_mae: 0.5250\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x19b868640>"
|
||
]
|
||
},
|
||
"execution_count": 94,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – show that building, training, saving, loading, and training again\n",
|
||
"# works fine with a model containing many custom parts\n",
|
||
"\n",
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Dense(30, activation=\"relu\", kernel_initializer=\"he_normal\",\n",
|
||
" input_shape=input_shape),\n",
|
||
" tf.keras.layers.Dense(1, activation=my_softplus,\n",
|
||
" kernel_initializer=my_glorot_initializer,\n",
|
||
" kernel_regularizer=my_l1_regularizer,\n",
|
||
" kernel_constraint=my_positive_weights)\n",
|
||
"])\n",
|
||
"model.compile(loss=\"mse\", optimizer=\"nadam\", metrics=[\"mae\"])\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))\n",
|
||
"model.save(\"my_model_with_many_custom_parts\")\n",
|
||
"model = tf.keras.models.load_model(\n",
|
||
" \"my_model_with_many_custom_parts\",\n",
|
||
" custom_objects={\n",
|
||
" \"my_l1_regularizer\": my_l1_regularizer,\n",
|
||
" \"my_positive_weights\": my_positive_weights,\n",
|
||
" \"my_glorot_initializer\": my_glorot_initializer,\n",
|
||
" \"my_softplus\": my_softplus,\n",
|
||
" }\n",
|
||
")\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 95,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class MyL1Regularizer(tf.keras.regularizers.Regularizer):\n",
|
||
" def __init__(self, factor):\n",
|
||
" self.factor = factor\n",
|
||
"\n",
|
||
" def __call__(self, weights):\n",
|
||
" return tf.reduce_sum(tf.abs(self.factor * weights))\n",
|
||
"\n",
|
||
" def get_config(self):\n",
|
||
" return {\"factor\": self.factor}"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 96,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 1.4714 - mae: 0.8316 - val_loss: inf - val_mae: inf\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 998us/step - loss: 0.8094 - mae: 0.6172 - val_loss: 2.6153 - val_mae: 0.6058\n",
|
||
"INFO:tensorflow:Assets written to: my_model_with_many_custom_parts/assets\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_many_custom_parts/assets\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 0.6333 - mae: 0.5617 - val_loss: 1.1687 - val_mae: 0.5468\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.5570 - mae: 0.5303 - val_loss: 1.0440 - val_mae: 0.5250\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x19b8db610>"
|
||
]
|
||
},
|
||
"execution_count": 96,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – again, show that everything works fine, this time using our\n",
|
||
"# custom regularizer class\n",
|
||
"\n",
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Dense(30, activation=\"relu\", kernel_initializer=\"he_normal\",\n",
|
||
" input_shape=input_shape),\n",
|
||
" tf.keras.layers.Dense(1, activation=my_softplus,\n",
|
||
" kernel_regularizer=MyL1Regularizer(0.01),\n",
|
||
" kernel_constraint=my_positive_weights,\n",
|
||
" kernel_initializer=my_glorot_initializer),\n",
|
||
"])\n",
|
||
"model.compile(loss=\"mse\", optimizer=\"nadam\", metrics=[\"mae\"])\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))\n",
|
||
"model.save(\"my_model_with_many_custom_parts\")\n",
|
||
"model = tf.keras.models.load_model(\n",
|
||
" \"my_model_with_many_custom_parts\",\n",
|
||
" custom_objects={\n",
|
||
" \"MyL1Regularizer\": MyL1Regularizer,\n",
|
||
" \"my_positive_weights\": my_positive_weights,\n",
|
||
" \"my_glorot_initializer\": my_glorot_initializer,\n",
|
||
" \"my_softplus\": my_softplus,\n",
|
||
" }\n",
|
||
")\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Custom Metrics"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 97,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"# extra code – once again, lets' create a basic Keras model\n",
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Dense(30, activation=\"relu\", kernel_initializer=\"he_normal\",\n",
|
||
" input_shape=input_shape),\n",
|
||
" tf.keras.layers.Dense(1),\n",
|
||
"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 98,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model.compile(loss=\"mse\", optimizer=\"nadam\", metrics=[create_huber(2.0)])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 99,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 844us/step - loss: 1.7474 - huber_fn: 0.6846\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 796us/step - loss: 0.7843 - huber_fn: 0.3136\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x19b4fcf10>"
|
||
]
|
||
},
|
||
"execution_count": 99,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – train the model with our custom metric\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=2)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"**Note**: if you use the same function as the loss and a metric, you may be surprised to see slightly different results. This is in part because the operations are not computed exactly in the same order, so there might be tiny floating point errors. More importantly, if you use sample weights or class weights, then the equations are a bit different:\n",
|
||
"* the `fit()` method keeps track of the mean of all batch losses seen so far since the start of the epoch. Each batch loss is the sum of the weighted instance losses divided by the _batch size_ (not the sum of weights, so the batch loss is _not_ the weighted mean of the losses).\n",
|
||
"* the metric since the start of the epoch is equal to the sum of weighted instance losses divided by sum of all weights seen so far. In other words, it is the weighted mean of all the instance losses. Not the same thing."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### Streaming metrics"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 100,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=0.8>"
|
||
]
|
||
},
|
||
"execution_count": 100,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"precision = tf.keras.metrics.Precision()\n",
|
||
"precision([0, 1, 1, 1, 0, 1, 0, 1], [1, 1, 0, 1, 0, 1, 0, 1])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 101,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=0.5>"
|
||
]
|
||
},
|
||
"execution_count": 101,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"precision([0, 1, 0, 0, 1, 0, 1, 1], [1, 0, 1, 1, 0, 0, 0, 0])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 102,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=0.5>"
|
||
]
|
||
},
|
||
"execution_count": 102,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"precision.result()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 103,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Variable 'true_positives:0' shape=(1,) dtype=float32, numpy=array([4.], dtype=float32)>,\n",
|
||
" <tf.Variable 'false_positives:0' shape=(1,) dtype=float32, numpy=array([4.], dtype=float32)>]"
|
||
]
|
||
},
|
||
"execution_count": 103,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"precision.variables"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 104,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"precision.reset_states()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Creating a streaming metric:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 105,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class HuberMetric(tf.keras.metrics.Metric):\n",
|
||
" def __init__(self, threshold=1.0, **kwargs):\n",
|
||
" super().__init__(**kwargs) # handles base args (e.g., dtype)\n",
|
||
" self.threshold = threshold\n",
|
||
" self.huber_fn = create_huber(threshold)\n",
|
||
" self.total = self.add_weight(\"total\", initializer=\"zeros\")\n",
|
||
" self.count = self.add_weight(\"count\", initializer=\"zeros\")\n",
|
||
"\n",
|
||
" def update_state(self, y_true, y_pred, sample_weight=None):\n",
|
||
" sample_metrics = self.huber_fn(y_true, y_pred)\n",
|
||
" self.total.assign_add(tf.reduce_sum(sample_metrics))\n",
|
||
" self.count.assign_add(tf.cast(tf.size(y_true), tf.float32))\n",
|
||
"\n",
|
||
" def result(self):\n",
|
||
" return self.total / self.count\n",
|
||
"\n",
|
||
" def get_config(self):\n",
|
||
" base_config = super().get_config()\n",
|
||
" return {**base_config, \"threshold\": self.threshold}"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"**Extra material** – the rest of this section tests the `HuberMetric` class and shows another implementation subclassing `tf.keras.metrics.Mean`."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 106,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=14.0>"
|
||
]
|
||
},
|
||
"execution_count": 106,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"m = HuberMetric(2.)\n",
|
||
"\n",
|
||
"# total = 2 * |10 - 2| - 2²/2 = 14\n",
|
||
"# count = 1\n",
|
||
"# result = 14 / 1 = 14\n",
|
||
"m(tf.constant([[2.]]), tf.constant([[10.]]))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 107,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=7.0>"
|
||
]
|
||
},
|
||
"execution_count": 107,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# total = total + (|1 - 0|² / 2) + (2 * |9.25 - 5| - 2² / 2) = 14 + 7 = 21\n",
|
||
"# count = count + 2 = 3\n",
|
||
"# result = total / count = 21 / 3 = 7\n",
|
||
"m(tf.constant([[0.], [5.]]), tf.constant([[1.], [9.25]]))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 108,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=7.0>"
|
||
]
|
||
},
|
||
"execution_count": 108,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"m.result()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 109,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Variable 'total:0' shape=() dtype=float32, numpy=21.0>,\n",
|
||
" <tf.Variable 'count:0' shape=() dtype=float32, numpy=3.0>]"
|
||
]
|
||
},
|
||
"execution_count": 109,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"m.variables"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 110,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Variable 'total:0' shape=() dtype=float32, numpy=0.0>,\n",
|
||
" <tf.Variable 'count:0' shape=() dtype=float32, numpy=0.0>]"
|
||
]
|
||
},
|
||
"execution_count": 110,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"m.reset_states()\n",
|
||
"m.variables"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Let's check that the `HuberMetric` class works well:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 111,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Dense(30, activation=\"relu\", kernel_initializer=\"he_normal\",\n",
|
||
" input_shape=input_shape),\n",
|
||
" tf.keras.layers.Dense(1),\n",
|
||
"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 112,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model.compile(loss=create_huber(2.0), optimizer=\"nadam\",\n",
|
||
" metrics=[HuberMetric(2.0)])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 113,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 886us/step - loss: 0.6492 - huber_metric_1: 0.6492\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 838us/step - loss: 0.2912 - huber_metric_1: 0.2912\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x19c2d1300>"
|
||
]
|
||
},
|
||
"execution_count": 113,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.fit(X_train_scaled, y_train, epochs=2)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 114,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_metric/assets\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_metric/assets\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"model.save(\"my_model_with_a_custom_metric\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 115,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model = tf.keras.models.load_model(\n",
|
||
" \"my_model_with_a_custom_metric\",\n",
|
||
" custom_objects={\n",
|
||
" \"huber_fn\": create_huber(2.0),\n",
|
||
" \"HuberMetric\": HuberMetric\n",
|
||
" }\n",
|
||
")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 116,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 916us/step - loss: 0.2416 - huber_metric_1: 0.2416\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 859us/step - loss: 0.2173 - huber_metric_1: 0.2173\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x19b5f0130>"
|
||
]
|
||
},
|
||
"execution_count": 116,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.fit(X_train_scaled, y_train, epochs=2)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"`model.metrics` contains the model's loss followed by the model's metric(s), so the `HuberMetric` is `model.metrics[-1]`:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 117,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"2.0"
|
||
]
|
||
},
|
||
"execution_count": 117,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.metrics[-1].threshold"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Looks like it works fine! More simply, we could have created the class like this:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 118,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class HuberMetric(tf.keras.metrics.Mean):\n",
|
||
" def __init__(self, threshold=1.0, name='HuberMetric', dtype=None):\n",
|
||
" self.threshold = threshold\n",
|
||
" self.huber_fn = create_huber(threshold)\n",
|
||
" super().__init__(name=name, dtype=dtype)\n",
|
||
"\n",
|
||
" def update_state(self, y_true, y_pred, sample_weight=None):\n",
|
||
" metric = self.huber_fn(y_true, y_pred)\n",
|
||
" super(HuberMetric, self).update_state(metric, sample_weight)\n",
|
||
"\n",
|
||
" def get_config(self):\n",
|
||
" base_config = super().get_config()\n",
|
||
" return {**base_config, \"threshold\": self.threshold} "
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"This class handles shapes better, and it also supports sample weights."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 119,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Dense(30, activation=\"relu\", kernel_initializer=\"he_normal\",\n",
|
||
" input_shape=input_shape),\n",
|
||
" tf.keras.layers.Dense(1),\n",
|
||
"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 120,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model.compile(loss=tf.keras.losses.Huber(2.0), optimizer=\"nadam\",\n",
|
||
" weighted_metrics=[HuberMetric(2.0)])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 121,
|
||
"metadata": {
|
||
"scrolled": true
|
||
},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 898us/step - loss: 0.3272 - HuberMetric: 0.6594\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 892us/step - loss: 0.1449 - HuberMetric: 0.2919\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"np.random.seed(42)\n",
|
||
"sample_weight = np.random.rand(len(y_train))\n",
|
||
"history = model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" sample_weight=sample_weight)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 122,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"(0.3272010087966919, 0.3272010869771911)"
|
||
]
|
||
},
|
||
"execution_count": 122,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"(history.history[\"loss\"][0],\n",
|
||
" history.history[\"HuberMetric\"][0] * sample_weight.mean())"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 123,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_metric_v2/assets\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_metric_v2/assets\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"model.save(\"my_model_with_a_custom_metric_v2\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 124,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model = tf.keras.models.load_model(\"my_model_with_a_custom_metric_v2\",\n",
|
||
" custom_objects={\"HuberMetric\": HuberMetric})"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 125,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 970us/step - loss: 0.2442 - HuberMetric: 0.2442\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 857us/step - loss: 0.2184 - HuberMetric: 0.2184\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x19c576e90>"
|
||
]
|
||
},
|
||
"execution_count": 125,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.fit(X_train_scaled, y_train, epochs=2)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 126,
|
||
"metadata": {
|
||
"scrolled": true
|
||
},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"2.0"
|
||
]
|
||
},
|
||
"execution_count": 126,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.metrics[-1].threshold"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Custom Layers"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 127,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"exponential_layer = tf.keras.layers.Lambda(lambda x: tf.exp(x))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 128,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(3,), dtype=float32, numpy=array([0.36787945, 1. , 2.7182817 ], dtype=float32)>"
|
||
]
|
||
},
|
||
"execution_count": 128,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – like all layers, it can be used as a function:\n",
|
||
"exponential_layer([-1., 0., 1.])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Adding an exponential layer at the output of a regression model can be useful if the values to predict are positive and with very different scales (e.g., 0.001, 10., 10000)."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 129,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/5\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 0.7784 - val_loss: 0.4393\n",
|
||
"Epoch 2/5\n",
|
||
"363/363 [==============================] - 0s 891us/step - loss: 0.5702 - val_loss: 0.4094\n",
|
||
"Epoch 3/5\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.4431 - val_loss: 0.3760\n",
|
||
"Epoch 4/5\n",
|
||
"363/363 [==============================] - 0s 921us/step - loss: 0.4984 - val_loss: 0.3785\n",
|
||
"Epoch 5/5\n",
|
||
"363/363 [==============================] - 0s 943us/step - loss: 0.3966 - val_loss: 0.3633\n",
|
||
"162/162 [==============================] - 0s 631us/step - loss: 0.3781\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"0.3781099021434784"
|
||
]
|
||
},
|
||
"execution_count": 129,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Dense(30, activation=\"relu\", input_shape=input_shape),\n",
|
||
" tf.keras.layers.Dense(1),\n",
|
||
" exponential_layer\n",
|
||
"])\n",
|
||
"model.compile(loss=\"mse\", optimizer=\"sgd\")\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=5,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))\n",
|
||
"model.evaluate(X_test_scaled, y_test)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Alternatively, it's often preferable to replace the targets with the logarithm of the targets (and use no activation function in the output layer)."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 130,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class MyDense(tf.keras.layers.Layer):\n",
|
||
" def __init__(self, units, activation=None, **kwargs):\n",
|
||
" super().__init__(**kwargs)\n",
|
||
" self.units = units\n",
|
||
" self.activation = tf.keras.activations.get(activation)\n",
|
||
"\n",
|
||
" def build(self, batch_input_shape):\n",
|
||
" self.kernel = self.add_weight(\n",
|
||
" name=\"kernel\", shape=[batch_input_shape[-1], self.units],\n",
|
||
" initializer=\"he_normal\")\n",
|
||
" self.bias = self.add_weight(\n",
|
||
" name=\"bias\", shape=[self.units], initializer=\"zeros\")\n",
|
||
"\n",
|
||
" def call(self, X):\n",
|
||
" return self.activation(X @ self.kernel + self.bias)\n",
|
||
"\n",
|
||
" def get_config(self):\n",
|
||
" base_config = super().get_config()\n",
|
||
" return {**base_config, \"units\": self.units,\n",
|
||
" \"activation\": tf.keras.activations.serialize(self.activation)}"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 131,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 3.1183 - val_loss: 6.9549\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.8702 - val_loss: 3.2627\n",
|
||
"162/162 [==============================] - 0s 718us/step - loss: 0.7039\n",
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_layer/assets\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_model_with_a_custom_layer/assets\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows that a custom layer can be used normally\n",
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" MyDense(30, activation=\"relu\", input_shape=input_shape),\n",
|
||
" MyDense(1)\n",
|
||
"])\n",
|
||
"model.compile(loss=\"mse\", optimizer=\"nadam\")\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))\n",
|
||
"model.evaluate(X_test_scaled, y_test)\n",
|
||
"model.save(\"my_model_with_a_custom_layer\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 132,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 0.5945 - val_loss: 0.5318\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.4712 - val_loss: 0.5751\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x19cbf39a0>"
|
||
]
|
||
},
|
||
"execution_count": 132,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows how to load a model with a custom layer\n",
|
||
"model = tf.keras.models.load_model(\"my_model_with_a_custom_layer\",\n",
|
||
" custom_objects={\"MyDense\": MyDense})\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 133,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class MyMultiLayer(tf.keras.layers.Layer):\n",
|
||
" def call(self, X):\n",
|
||
" X1, X2 = X\n",
|
||
" print(\"X1.shape: \", X1.shape ,\" X2.shape: \", X2.shape) # extra code\n",
|
||
" return X1 + X2, X1 * X2, X1 / X2"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Our custom layer can be called using the functional API like this:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 134,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"X1.shape: (None, 2) X2.shape: (None, 2)\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"(<KerasTensor: shape=(None, 2) dtype=float32 (created by layer 'my_multi_layer')>,\n",
|
||
" <KerasTensor: shape=(None, 2) dtype=float32 (created by layer 'my_multi_layer')>,\n",
|
||
" <KerasTensor: shape=(None, 2) dtype=float32 (created by layer 'my_multi_layer')>)"
|
||
]
|
||
},
|
||
"execution_count": 134,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – tests MyMultiLayer with symbolic inputs\n",
|
||
"inputs1 = tf.keras.layers.Input(shape=[2])\n",
|
||
"inputs2 = tf.keras.layers.Input(shape=[2])\n",
|
||
"MyMultiLayer()((inputs1, inputs2))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Note that the `call()` method receives symbolic inputs, and it returns symbolic outputs. The shapes are only partially specified at this stage: we don't know the batch size, which is why the first dimension is `None`.\n",
|
||
"\n",
|
||
"We can also pass actual data to the custom layer:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 135,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"X1.shape: (2, 2) X2.shape: (2, 2)\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"(<tf.Tensor: shape=(2, 2), dtype=float32, numpy=\n",
|
||
" array([[ 9., 18.],\n",
|
||
" [ 6., 10.]], dtype=float32)>,\n",
|
||
" <tf.Tensor: shape=(2, 2), dtype=float32, numpy=\n",
|
||
" array([[18., 72.],\n",
|
||
" [ 8., 21.]], dtype=float32)>,\n",
|
||
" <tf.Tensor: shape=(2, 2), dtype=float32, numpy=\n",
|
||
" array([[0.5 , 0.5 ],\n",
|
||
" [0.5 , 2.3333333]], dtype=float32)>)"
|
||
]
|
||
},
|
||
"execution_count": 135,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – tests MyMultiLayer with actual data \n",
|
||
"X1, X2 = np.array([[3., 6.], [2., 7.]]), np.array([[6., 12.], [4., 3.]]) \n",
|
||
"MyMultiLayer()((X1, X2))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Now let's create a layer with a different behavior during training and testing:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 136,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class MyGaussianNoise(tf.keras.layers.Layer):\n",
|
||
" def __init__(self, stddev, **kwargs):\n",
|
||
" super().__init__(**kwargs)\n",
|
||
" self.stddev = stddev\n",
|
||
"\n",
|
||
" def call(self, X, training=None):\n",
|
||
" if training:\n",
|
||
" noise = tf.random.normal(tf.shape(X), stddev=self.stddev)\n",
|
||
" return X + noise\n",
|
||
" else:\n",
|
||
" return X"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Here's a simple model that uses this custom layer:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 137,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 2.2220 - val_loss: 25.1506\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 1.4104 - val_loss: 17.0415\n",
|
||
"162/162 [==============================] - 0s 655us/step - loss: 1.1059\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"1.1058681011199951"
|
||
]
|
||
},
|
||
"execution_count": 137,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – tests MyGaussianNoise\n",
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" MyGaussianNoise(stddev=1.0, input_shape=input_shape),\n",
|
||
" tf.keras.layers.Dense(30, activation=\"relu\",\n",
|
||
" kernel_initializer=\"he_normal\"),\n",
|
||
" tf.keras.layers.Dense(1)\n",
|
||
"])\n",
|
||
"model.compile(loss=\"mse\", optimizer=\"nadam\")\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))\n",
|
||
"model.evaluate(X_test_scaled, y_test)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Custom Models"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 138,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class ResidualBlock(tf.keras.layers.Layer):\n",
|
||
" def __init__(self, n_layers, n_neurons, **kwargs):\n",
|
||
" super().__init__(**kwargs)\n",
|
||
" self.hidden = [tf.keras.layers.Dense(n_neurons, activation=\"relu\",\n",
|
||
" kernel_initializer=\"he_normal\")\n",
|
||
" for _ in range(n_layers)]\n",
|
||
"\n",
|
||
" def call(self, inputs):\n",
|
||
" Z = inputs\n",
|
||
" for layer in self.hidden:\n",
|
||
" Z = layer(Z)\n",
|
||
" return inputs + Z"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 139,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class ResidualRegressor(tf.keras.Model):\n",
|
||
" def __init__(self, output_dim, **kwargs):\n",
|
||
" super().__init__(**kwargs)\n",
|
||
" self.hidden1 = tf.keras.layers.Dense(30, activation=\"relu\",\n",
|
||
" kernel_initializer=\"he_normal\")\n",
|
||
" self.block1 = ResidualBlock(2, 30)\n",
|
||
" self.block2 = ResidualBlock(2, 30)\n",
|
||
" self.out = tf.keras.layers.Dense(output_dim)\n",
|
||
"\n",
|
||
" def call(self, inputs):\n",
|
||
" Z = self.hidden1(inputs)\n",
|
||
" for _ in range(1 + 3):\n",
|
||
" Z = self.block1(Z)\n",
|
||
" Z = self.block2(Z)\n",
|
||
" return self.out(Z)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 140,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 2s 1ms/step - loss: 32.7847\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 1.3612\n",
|
||
"162/162 [==============================] - 0s 713us/step - loss: 1.1603\n",
|
||
"INFO:tensorflow:Assets written to: my_custom_model/assets\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"INFO:tensorflow:Assets written to: my_custom_model/assets\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows that the model can be used normally\n",
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = ResidualRegressor(1)\n",
|
||
"model.compile(loss=\"mse\", optimizer=\"nadam\")\n",
|
||
"history = model.fit(X_train_scaled, y_train, epochs=2)\n",
|
||
"score = model.evaluate(X_test_scaled, y_test)\n",
|
||
"model.save(\"my_custom_model\")"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 141,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"363/363 [==============================] - 2s 1ms/step - loss: 1.3451\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.7928\n",
|
||
"1/1 [==============================] - 0s 76ms/step\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"array([[1.1431919],\n",
|
||
" [1.0584592],\n",
|
||
" [4.71127 ]], dtype=float32)"
|
||
]
|
||
},
|
||
"execution_count": 141,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – the model can be loaded and you can continue training or use it\n",
|
||
"# to make predictions\n",
|
||
"model = tf.keras.models.load_model(\"my_custom_model\")\n",
|
||
"history = model.fit(X_train_scaled, y_train, epochs=2)\n",
|
||
"model.predict(X_test_scaled[:3])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"We could have defined the model using the sequential API instead:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 142,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"block1 = ResidualBlock(2, 30)\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Dense(30, activation=\"relu\",\n",
|
||
" kernel_initializer=\"he_normal\"),\n",
|
||
" block1, block1, block1, block1,\n",
|
||
" ResidualBlock(2, 30),\n",
|
||
" tf.keras.layers.Dense(1)\n",
|
||
"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Losses and Metrics Based on Model Internals"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 143,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class ReconstructingRegressor(tf.keras.Model):\n",
|
||
" def __init__(self, output_dim, **kwargs):\n",
|
||
" super().__init__(**kwargs)\n",
|
||
" self.hidden = [tf.keras.layers.Dense(30, activation=\"relu\",\n",
|
||
" kernel_initializer=\"he_normal\")\n",
|
||
" for _ in range(5)]\n",
|
||
" self.out = tf.keras.layers.Dense(output_dim)\n",
|
||
" self.reconstruction_mean = tf.keras.metrics.Mean(\n",
|
||
" name=\"reconstruction_error\")\n",
|
||
"\n",
|
||
" def build(self, batch_input_shape):\n",
|
||
" n_inputs = batch_input_shape[-1]\n",
|
||
" self.reconstruct = tf.keras.layers.Dense(n_inputs)\n",
|
||
"\n",
|
||
" def call(self, inputs, training=None):\n",
|
||
" Z = inputs\n",
|
||
" for layer in self.hidden:\n",
|
||
" Z = layer(Z)\n",
|
||
" reconstruction = self.reconstruct(Z)\n",
|
||
" recon_loss = tf.reduce_mean(tf.square(reconstruction - inputs))\n",
|
||
" self.add_loss(0.05 * recon_loss)\n",
|
||
" if training:\n",
|
||
" result = self.reconstruction_mean(recon_loss)\n",
|
||
" self.add_metric(result)\n",
|
||
" return self.out(Z)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 144,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/5\n",
|
||
"363/363 [==============================] - 2s 1ms/step - loss: 0.8198 - reconstruction_error: 1.0892\n",
|
||
"Epoch 2/5\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.4778 - reconstruction_error: 0.5583\n",
|
||
"Epoch 3/5\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.4419 - reconstruction_error: 0.4227\n",
|
||
"Epoch 4/5\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.3852 - reconstruction_error: 0.3587\n",
|
||
"Epoch 5/5\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.3714 - reconstruction_error: 0.3245\n",
|
||
"162/162 [==============================] - 0s 658us/step\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code\n",
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = ReconstructingRegressor(1)\n",
|
||
"model.compile(loss=\"mse\", optimizer=\"nadam\")\n",
|
||
"history = model.fit(X_train_scaled, y_train, epochs=5)\n",
|
||
"y_pred = model.predict(X_test_scaled)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Computing Gradients Using Autodiff"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 145,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"def f(w1, w2):\n",
|
||
" return 3 * w1 ** 2 + 2 * w1 * w2"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 146,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"36.000003007075065"
|
||
]
|
||
},
|
||
"execution_count": 146,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"w1, w2 = 5, 3\n",
|
||
"eps = 1e-6\n",
|
||
"(f(w1 + eps, w2) - f(w1, w2)) / eps"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 147,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"10.000000003174137"
|
||
]
|
||
},
|
||
"execution_count": 147,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"(f(w1, w2 + eps) - f(w1, w2)) / eps"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 148,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"w1, w2 = tf.Variable(5.), tf.Variable(3.)\n",
|
||
"with tf.GradientTape() as tape:\n",
|
||
" z = f(w1, w2)\n",
|
||
"\n",
|
||
"gradients = tape.gradient(z, [w1, w2])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 149,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor: shape=(), dtype=float32, numpy=36.0>,\n",
|
||
" <tf.Tensor: shape=(), dtype=float32, numpy=10.0>]"
|
||
]
|
||
},
|
||
"execution_count": 149,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"gradients"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 150,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"A non-persistent GradientTape can only be used to compute one set of gradients (or jacobians)\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"with tf.GradientTape() as tape:\n",
|
||
" z = f(w1, w2)\n",
|
||
"\n",
|
||
"dz_dw1 = tape.gradient(z, w1) # returns tensor 36.0\n",
|
||
"try:\n",
|
||
" dz_dw2 = tape.gradient(z, w2) # raises a RuntimeError!\n",
|
||
"except RuntimeError as ex:\n",
|
||
" print(ex)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 151,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"with tf.GradientTape(persistent=True) as tape:\n",
|
||
" z = f(w1, w2)\n",
|
||
"\n",
|
||
"dz_dw1 = tape.gradient(z, w1) # returns tensor 36.0\n",
|
||
"dz_dw2 = tape.gradient(z, w2) # returns tensor 10.0, works fine now!\n",
|
||
"del tape"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 152,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"(<tf.Tensor: shape=(), dtype=float32, numpy=36.0>,\n",
|
||
" <tf.Tensor: shape=(), dtype=float32, numpy=10.0>)"
|
||
]
|
||
},
|
||
"execution_count": 152,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"dz_dw1, dz_dw2"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 153,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"c1, c2 = tf.constant(5.), tf.constant(3.)\n",
|
||
"with tf.GradientTape() as tape:\n",
|
||
" z = f(c1, c2)\n",
|
||
"\n",
|
||
"gradients = tape.gradient(z, [c1, c2])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 154,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[None, None]"
|
||
]
|
||
},
|
||
"execution_count": 154,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"gradients"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 155,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"with tf.GradientTape() as tape:\n",
|
||
" tape.watch(c1)\n",
|
||
" tape.watch(c2)\n",
|
||
" z = f(c1, c2)\n",
|
||
"\n",
|
||
"gradients = tape.gradient(z, [c1, c2])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 156,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor: shape=(), dtype=float32, numpy=36.0>,\n",
|
||
" <tf.Tensor: shape=(), dtype=float32, numpy=10.0>]"
|
||
]
|
||
},
|
||
"execution_count": 156,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"gradients"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 157,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor: shape=(), dtype=float32, numpy=136.0>,\n",
|
||
" <tf.Tensor: shape=(), dtype=float32, numpy=30.0>]"
|
||
]
|
||
},
|
||
"execution_count": 157,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – if given a vector, tape.gradient() will compute the gradient of\n",
|
||
"# the vector's sum.\n",
|
||
"with tf.GradientTape() as tape:\n",
|
||
" z1 = f(w1, w2 + 2.)\n",
|
||
" z2 = f(w1, w2 + 5.)\n",
|
||
" z3 = f(w1, w2 + 7.)\n",
|
||
"\n",
|
||
"tape.gradient([z1, z2, z3], [w1, w2])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 158,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor: shape=(), dtype=float32, numpy=136.0>,\n",
|
||
" <tf.Tensor: shape=(), dtype=float32, numpy=30.0>]"
|
||
]
|
||
},
|
||
"execution_count": 158,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows that we get the same result as the previous cell\n",
|
||
"with tf.GradientTape() as tape:\n",
|
||
" z1 = f(w1, w2 + 2.)\n",
|
||
" z2 = f(w1, w2 + 5.)\n",
|
||
" z3 = f(w1, w2 + 7.)\n",
|
||
" z = z1 + z2 + z3\n",
|
||
"\n",
|
||
"tape.gradient(z, [w1, w2])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 159,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"# extra code – shows how to compute the jacobians and the hessians\n",
|
||
"with tf.GradientTape(persistent=True) as hessian_tape:\n",
|
||
" with tf.GradientTape() as jacobian_tape:\n",
|
||
" z = f(w1, w2)\n",
|
||
" jacobians = jacobian_tape.gradient(z, [w1, w2])\n",
|
||
"hessians = [hessian_tape.gradient(jacobian, [w1, w2])\n",
|
||
" for jacobian in jacobians]\n",
|
||
"del hessian_tape"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 160,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor: shape=(), dtype=float32, numpy=36.0>,\n",
|
||
" <tf.Tensor: shape=(), dtype=float32, numpy=10.0>]"
|
||
]
|
||
},
|
||
"execution_count": 160,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"jacobians"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 161,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[[<tf.Tensor: shape=(), dtype=float32, numpy=6.0>,\n",
|
||
" <tf.Tensor: shape=(), dtype=float32, numpy=2.0>],\n",
|
||
" [<tf.Tensor: shape=(), dtype=float32, numpy=2.0>, None]]"
|
||
]
|
||
},
|
||
"execution_count": 161,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"hessians"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 162,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"def f(w1, w2):\n",
|
||
" return 3 * w1 ** 2 + tf.stop_gradient(2 * w1 * w2)\n",
|
||
"\n",
|
||
"with tf.GradientTape() as tape:\n",
|
||
" z = f(w1, w2) # same result as without stop_gradient()\n",
|
||
"\n",
|
||
"gradients = tape.gradient(z, [w1, w2])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 163,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor: shape=(), dtype=float32, numpy=30.0>, None]"
|
||
]
|
||
},
|
||
"execution_count": 163,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"gradients"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 164,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor: shape=(), dtype=float32, numpy=inf>]"
|
||
]
|
||
},
|
||
"execution_count": 164,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"x = tf.Variable(1e-50)\n",
|
||
"with tf.GradientTape() as tape:\n",
|
||
" z = tf.sqrt(x)\n",
|
||
"\n",
|
||
"tape.gradient(z, [x])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 165,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=30.0>"
|
||
]
|
||
},
|
||
"execution_count": 165,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.math.log(tf.exp(tf.constant(30., dtype=tf.float32)) + 1.)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 166,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor: shape=(1,), dtype=float32, numpy=array([nan], dtype=float32)>]"
|
||
]
|
||
},
|
||
"execution_count": 166,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"x = tf.Variable([1.0e30])\n",
|
||
"with tf.GradientTape() as tape:\n",
|
||
" z = my_softplus(x)\n",
|
||
"\n",
|
||
"tape.gradient(z, [x])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 167,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"def my_softplus(z):\n",
|
||
" return tf.math.log(1 + tf.exp(-tf.abs(z))) + tf.maximum(0., z)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Here is the proof that this equation is equal to log(1 + exp(_z_)):\n",
|
||
"* softplus(_z_) = log(1 + exp(_z_))\n",
|
||
"* softplus(_z_) = log(1 + exp(_z_)) - log(exp(_z_)) + log(exp(_z_)) ; **just adding and subtracting the same value**\n",
|
||
"* softplus(_z_) = log\\[(1 + exp(_z_)) / exp(_z_)\\] + log(exp(_z_)) ; **since log(_a_) - log(_b_) = log(_a_ / _b_)**\n",
|
||
"* softplus(_z_) = log\\[(1 + exp(_z_)) / exp(_z_)\\] + _z_ ; **since log(exp(_z_)) = _z_**\n",
|
||
"* softplus(_z_) = log\\[1 / exp(_z_) + exp(_z_) / exp(_z_)\\] + _z_ ; **since (1 + _a_) / _b_ = 1 / _b_ + _a_ / _b_**\n",
|
||
"* softplus(_z_) = log\\[exp(–_z_) + 1\\] + _z_ ; **since 1 / exp(_z_) = exp(–z), and exp(_z_) / exp(_z_) = 1**\n",
|
||
"* softplus(_z_) = softplus(–_z_) + _z_ ; **we recognize the definition at the top, but with –_z_**\n",
|
||
"* softplus(_z_) = softplus(–|_z_|) + max(0, _z_) ; **if you consider both cases, _z_ < 0 or _z_ ≥ 0, you will see that this works**"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 168,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"@tf.custom_gradient\n",
|
||
"def my_softplus(z):\n",
|
||
" def my_softplus_gradients(grads): # grads = backprop'ed from upper layers\n",
|
||
" return grads * (1 - 1 / (1 + tf.exp(z))) # stable grads of softplus\n",
|
||
"\n",
|
||
" result = tf.math.log(1 + tf.exp(-tf.abs(z))) + tf.maximum(0., z)\n",
|
||
" return result, my_softplus_gradients"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 169,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"(<tf.Tensor: shape=(1,), dtype=float32, numpy=array([1000.], dtype=float32)>,\n",
|
||
" [<tf.Tensor: shape=(1,), dtype=float32, numpy=array([1.], dtype=float32)>])"
|
||
]
|
||
},
|
||
"execution_count": 169,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows that the function is now stable, as well as its gradients\n",
|
||
"x = tf.Variable([1000.])\n",
|
||
"with tf.GradientTape() as tape:\n",
|
||
" z = my_softplus(x)\n",
|
||
"\n",
|
||
"z, tape.gradient(z, [x])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Custom Training Loops"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 170,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42) # extra code – to ensure reproducibility\n",
|
||
"l2_reg = tf.keras.regularizers.l2(0.05)\n",
|
||
"model = tf.keras.models.Sequential([\n",
|
||
" tf.keras.layers.Dense(30, activation=\"relu\", kernel_initializer=\"he_normal\",\n",
|
||
" kernel_regularizer=l2_reg),\n",
|
||
" tf.keras.layers.Dense(1, kernel_regularizer=l2_reg)\n",
|
||
"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 171,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"def random_batch(X, y, batch_size=32):\n",
|
||
" idx = np.random.randint(len(X), size=batch_size)\n",
|
||
" return X[idx], y[idx]"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 172,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"def print_status_bar(step, total, loss, metrics=None):\n",
|
||
" metrics = \" - \".join([f\"{m.name}: {m.result():.4f}\"\n",
|
||
" for m in [loss] + (metrics or [])])\n",
|
||
" end = \"\" if step < total else \"\\n\"\n",
|
||
" print(f\"\\r{step}/{total} - \" + metrics, end=end)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 173,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 174,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"n_epochs = 5\n",
|
||
"batch_size = 32\n",
|
||
"n_steps = len(X_train) // batch_size\n",
|
||
"optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)\n",
|
||
"loss_fn = tf.keras.losses.mean_squared_error\n",
|
||
"mean_loss = tf.keras.metrics.Mean()\n",
|
||
"metrics = [tf.keras.metrics.MeanAbsoluteError()]"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 175,
|
||
"metadata": {
|
||
"scrolled": true
|
||
},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/5\n",
|
||
"362/362 - mean: 3.5419 - mean_absolute_error: 0.6640\n",
|
||
"Epoch 2/5\n",
|
||
"362/362 - mean: 1.8693 - mean_absolute_error: 0.5431\n",
|
||
"Epoch 3/5\n",
|
||
"362/362 - mean: 1.1428 - mean_absolute_error: 0.5030\n",
|
||
"Epoch 4/5\n",
|
||
"362/362 - mean: 0.8501 - mean_absolute_error: 0.4977\n",
|
||
"Epoch 5/5\n",
|
||
"362/362 - mean: 0.7280 - mean_absolute_error: 0.5014\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"for epoch in range(1, n_epochs + 1):\n",
|
||
" print(f\"Epoch {epoch}/{n_epochs}\")\n",
|
||
" for step in range(1, n_steps + 1):\n",
|
||
" X_batch, y_batch = random_batch(X_train_scaled, y_train)\n",
|
||
" with tf.GradientTape() as tape:\n",
|
||
" y_pred = model(X_batch, training=True)\n",
|
||
" main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred))\n",
|
||
" loss = tf.add_n([main_loss] + model.losses)\n",
|
||
"\n",
|
||
" gradients = tape.gradient(loss, model.trainable_variables)\n",
|
||
" optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n",
|
||
"\n",
|
||
" # extra code – if your model has variable constraints\n",
|
||
" for variable in model.variables:\n",
|
||
" if variable.constraint is not None:\n",
|
||
" variable.assign(variable.constraint(variable))\n",
|
||
"\n",
|
||
" mean_loss(loss)\n",
|
||
" for metric in metrics:\n",
|
||
" metric(y_batch, y_pred)\n",
|
||
"\n",
|
||
" print_status_bar(step, n_steps, mean_loss, metrics)\n",
|
||
"\n",
|
||
" for metric in [mean_loss] + metrics:\n",
|
||
" metric.reset_states()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 176,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "28534c4a7baf4b78a8a9f1db10024cfd",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"All epochs: 0%| | 0/5 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "cd7c0a89c62f476db08f755e6e4f1178",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 1/5: 0%| | 0/362 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "5866293693b1455584e6a2e28811692a",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 2/5: 0%| | 0/362 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "84cf94014b644e07b649063016221d3f",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 3/5: 0%| | 0/362 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "21e3803f4d4249049efc0b725c9bd23f",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 4/5: 0%| | 0/362 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "c8c0aa7115374ed8891175bafc6f7d0d",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 5/5: 0%| | 0/362 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
}
|
||
],
|
||
"source": [
|
||
"# extra code – shows how to use the tqdm package to display nice progress bars\n",
|
||
"\n",
|
||
"from tqdm.notebook import trange\n",
|
||
"from collections import OrderedDict\n",
|
||
"with trange(1, n_epochs + 1, desc=\"All epochs\") as epochs:\n",
|
||
" for epoch in epochs:\n",
|
||
" with trange(1, n_steps + 1, desc=f\"Epoch {epoch}/{n_epochs}\") as steps:\n",
|
||
" for step in steps:\n",
|
||
" X_batch, y_batch = random_batch(X_train_scaled, y_train)\n",
|
||
" with tf.GradientTape() as tape:\n",
|
||
" y_pred = model(X_batch)\n",
|
||
" main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred))\n",
|
||
" loss = tf.add_n([main_loss] + model.losses)\n",
|
||
"\n",
|
||
" gradients = tape.gradient(loss, model.trainable_variables)\n",
|
||
" optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n",
|
||
"\n",
|
||
" for variable in model.variables:\n",
|
||
" if variable.constraint is not None:\n",
|
||
" variable.assign(variable.constraint(variable))\n",
|
||
"\n",
|
||
" status = OrderedDict()\n",
|
||
" mean_loss(loss)\n",
|
||
" status[\"loss\"] = mean_loss.result().numpy()\n",
|
||
" for metric in metrics:\n",
|
||
" metric(y_batch, y_pred)\n",
|
||
" status[metric.name] = metric.result().numpy()\n",
|
||
"\n",
|
||
" steps.set_postfix(status)\n",
|
||
"\n",
|
||
" for metric in [mean_loss] + metrics:\n",
|
||
" metric.reset_states()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## TensorFlow Functions"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 177,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"def cube(x):\n",
|
||
" return x ** 3"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 178,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"8"
|
||
]
|
||
},
|
||
"execution_count": 178,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"cube(2)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 179,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=8.0>"
|
||
]
|
||
},
|
||
"execution_count": 179,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"cube(tf.constant(2.0))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 180,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tensorflow.python.eager.polymorphic_function.polymorphic_function.Function at 0x19db349d0>"
|
||
]
|
||
},
|
||
"execution_count": 180,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf_cube = tf.function(cube)\n",
|
||
"tf_cube"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 181,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=int32, numpy=8>"
|
||
]
|
||
},
|
||
"execution_count": 181,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf_cube(2)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 182,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=8.0>"
|
||
]
|
||
},
|
||
"execution_count": 182,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf_cube(tf.constant(2.0))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 183,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"@tf.function\n",
|
||
"def tf_cube(x):\n",
|
||
" return x ** 3"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"**Note:** the rest of the code in this section is in appendix D."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### TF Functions and Concrete Functions"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 184,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<ConcreteFunction tf_cube(x) at 0x19F90F400>"
|
||
]
|
||
},
|
||
"execution_count": 184,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"concrete_function = tf_cube.get_concrete_function(tf.constant(2.0))\n",
|
||
"concrete_function"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 185,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=8.0>"
|
||
]
|
||
},
|
||
"execution_count": 185,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"concrete_function(tf.constant(2.0))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 186,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"True"
|
||
]
|
||
},
|
||
"execution_count": 186,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"concrete_function is tf_cube.get_concrete_function(tf.constant(2.0))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### Exploring Function Definitions and Graphs"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 187,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"PyGraph<6956689888>"
|
||
]
|
||
},
|
||
"execution_count": 187,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"concrete_function.graph"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 188,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Operation 'x' type=Placeholder>,\n",
|
||
" <tf.Operation 'pow/y' type=Const>,\n",
|
||
" <tf.Operation 'pow' type=Pow>,\n",
|
||
" <tf.Operation 'Identity' type=Identity>]"
|
||
]
|
||
},
|
||
"execution_count": 188,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"ops = concrete_function.graph.get_operations()\n",
|
||
"ops"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 189,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor 'x:0' shape=() dtype=float32>,\n",
|
||
" <tf.Tensor 'pow/y:0' shape=() dtype=float32>]"
|
||
]
|
||
},
|
||
"execution_count": 189,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"pow_op = ops[2]\n",
|
||
"list(pow_op.inputs)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 190,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Tensor 'pow:0' shape=() dtype=float32>]"
|
||
]
|
||
},
|
||
"execution_count": 190,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"pow_op.outputs"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 191,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Operation 'x' type=Placeholder>"
|
||
]
|
||
},
|
||
"execution_count": 191,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"concrete_function.graph.get_operation_by_name('x')"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 192,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor 'Identity:0' shape=() dtype=float32>"
|
||
]
|
||
},
|
||
"execution_count": 192,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"concrete_function.graph.get_tensor_by_name('Identity:0')"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 193,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"name: \"__inference_tf_cube_592407\"\n",
|
||
"input_arg {\n",
|
||
" name: \"x\"\n",
|
||
" type: DT_FLOAT\n",
|
||
"}\n",
|
||
"output_arg {\n",
|
||
" name: \"identity\"\n",
|
||
" type: DT_FLOAT\n",
|
||
"}"
|
||
]
|
||
},
|
||
"execution_count": 193,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"concrete_function.function_def.signature"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### How TF Functions Trace Python Functions to Extract Their Computation Graphs"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 194,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"@tf.function\n",
|
||
"def tf_cube(x):\n",
|
||
" print(f\"x = {x}\")\n",
|
||
" return x ** 3"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 195,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"x = Tensor(\"x:0\", shape=(), dtype=float32)\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"result = tf_cube(tf.constant(2.0))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 196,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=8.0>"
|
||
]
|
||
},
|
||
"execution_count": 196,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"result"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 197,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"x = 2\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"result = tf_cube(2)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 198,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"x = 3\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"result = tf_cube(3)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 199,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"x = Tensor(\"x:0\", shape=(1, 2), dtype=float32)\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"result = tf_cube(tf.constant([[1., 2.]])) # New shape: trace!"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 200,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"x = Tensor(\"x:0\", shape=(2, 2), dtype=float32)\n",
|
||
"WARNING:tensorflow:5 out of the last 5 calls to <function tf_cube at 0x19f910c10> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n"
|
||
]
|
||
},
|
||
{
|
||
"name": "stderr",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"WARNING:tensorflow:5 out of the last 5 calls to <function tf_cube at 0x19f910c10> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"result = tf_cube(tf.constant([[3., 4.], [5., 6.]])) # New shape: trace!"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 201,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"result = tf_cube(tf.constant([[7., 8.], [9., 10.]])) # Same shape: no trace"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"It is also possible to specify a particular input signature:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 202,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"@tf.function(input_signature=[tf.TensorSpec([None, 28, 28], tf.float32)])\n",
|
||
"def shrink(images):\n",
|
||
" print(\"Tracing\", images) # extra code to show when tracing happens\n",
|
||
" return images[:, ::2, ::2] # drop half the rows and columns"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 203,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 204,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Tracing Tensor(\"images:0\", shape=(None, 28, 28), dtype=float32)\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"img_batch_1 = tf.random.uniform(shape=[100, 28, 28])\n",
|
||
"img_batch_2 = tf.random.uniform(shape=[50, 28, 28])\n",
|
||
"preprocessed_images = shrink(img_batch_1) # Works fine, traces the function\n",
|
||
"preprocessed_images = shrink(img_batch_2) # Works fine, same concrete function"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 205,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Binding inputs to tf.function `shrink` failed due to `Can not cast TensorSpec(shape=(2, 2, 2), dtype=tf.float32, name=None) to TensorSpec(shape=(None, 28, 28), dtype=tf.float32, name=None)`. Received args: (<tf.Tensor: shape=(2, 2, 2), dtype=float32, numpy=\n",
|
||
"array([[[0.7413678 , 0.62854624],\n",
|
||
" [0.01738465, 0.3431449 ]],\n",
|
||
"\n",
|
||
" [[0.51063764, 0.3777541 ],\n",
|
||
" [0.07321596, 0.02137029]]], dtype=float32)>,) and kwargs: {} for signature: (images: TensorSpec(shape=(None, 28, 28), dtype=tf.float32, name=None)).\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"img_batch_3 = tf.random.uniform(shape=[2, 2, 2])\n",
|
||
"try:\n",
|
||
" preprocessed_images = shrink(img_batch_3) # TypeError! Incompatible inputs\n",
|
||
"except TypeError as ex:\n",
|
||
" print(ex)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### Using Autograph To Capture Control Flow"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"A \"static\" `for` loop using `range()`:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 206,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"@tf.function\n",
|
||
"def add_10(x):\n",
|
||
" for i in range(10):\n",
|
||
" x += 1\n",
|
||
" return x"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 207,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=int32, numpy=15>"
|
||
]
|
||
},
|
||
"execution_count": 207,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"add_10(tf.constant(5))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 208,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Operation 'x' type=Placeholder>,\n",
|
||
" <tf.Operation 'add/y' type=Const>,\n",
|
||
" <tf.Operation 'add' type=AddV2>,\n",
|
||
" <tf.Operation 'add_1/y' type=Const>,\n",
|
||
" <tf.Operation 'add_1' type=AddV2>,\n",
|
||
" <tf.Operation 'add_2/y' type=Const>,\n",
|
||
" <tf.Operation 'add_2' type=AddV2>,\n",
|
||
" <tf.Operation 'add_3/y' type=Const>,\n",
|
||
" <tf.Operation 'add_3' type=AddV2>,\n",
|
||
" <tf.Operation 'add_4/y' type=Const>,\n",
|
||
" <tf.Operation 'add_4' type=AddV2>,\n",
|
||
" <tf.Operation 'add_5/y' type=Const>,\n",
|
||
" <tf.Operation 'add_5' type=AddV2>,\n",
|
||
" <tf.Operation 'add_6/y' type=Const>,\n",
|
||
" <tf.Operation 'add_6' type=AddV2>,\n",
|
||
" <tf.Operation 'add_7/y' type=Const>,\n",
|
||
" <tf.Operation 'add_7' type=AddV2>,\n",
|
||
" <tf.Operation 'add_8/y' type=Const>,\n",
|
||
" <tf.Operation 'add_8' type=AddV2>,\n",
|
||
" <tf.Operation 'add_9/y' type=Const>,\n",
|
||
" <tf.Operation 'add_9' type=AddV2>,\n",
|
||
" <tf.Operation 'Identity' type=Identity>]"
|
||
]
|
||
},
|
||
"execution_count": 208,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"add_10.get_concrete_function(tf.constant(5)).graph.get_operations()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"A \"dynamic\" loop using `tf.while_loop()`:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 209,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"# extra code – shows how to use tf.while_loop (usually @tf.function is simpler)\n",
|
||
"@tf.function\n",
|
||
"def add_10(x):\n",
|
||
" condition = lambda i, x: tf.less(i, 10)\n",
|
||
" body = lambda i, x: (tf.add(i, 1), tf.add(x, 1))\n",
|
||
" final_i, final_x = tf.while_loop(condition, body, [tf.constant(0), x])\n",
|
||
" return final_x"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 210,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=int32, numpy=15>"
|
||
]
|
||
},
|
||
"execution_count": 210,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"add_10(tf.constant(5))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 211,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Operation 'x' type=Placeholder>,\n",
|
||
" <tf.Operation 'Const' type=Const>,\n",
|
||
" <tf.Operation 'while/maximum_iterations' type=Const>,\n",
|
||
" <tf.Operation 'while/loop_counter' type=Const>,\n",
|
||
" <tf.Operation 'while' type=StatelessWhile>,\n",
|
||
" <tf.Operation 'Identity' type=Identity>]"
|
||
]
|
||
},
|
||
"execution_count": 211,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"add_10.get_concrete_function(tf.constant(5)).graph.get_operations()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"A \"dynamic\" `for` loop using `tf.range()` (captured by autograph):"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 212,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"@tf.function\n",
|
||
"def add_10(x):\n",
|
||
" for i in tf.range(10):\n",
|
||
" x = x + 1\n",
|
||
" return x"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 213,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[<tf.Operation 'x' type=Placeholder>,\n",
|
||
" <tf.Operation 'range/start' type=Const>,\n",
|
||
" <tf.Operation 'range/limit' type=Const>,\n",
|
||
" <tf.Operation 'range/delta' type=Const>,\n",
|
||
" <tf.Operation 'range' type=Range>,\n",
|
||
" <tf.Operation 'sub' type=Sub>,\n",
|
||
" <tf.Operation 'floordiv' type=FloorDiv>,\n",
|
||
" <tf.Operation 'mod' type=FloorMod>,\n",
|
||
" <tf.Operation 'zeros_like' type=Const>,\n",
|
||
" <tf.Operation 'NotEqual' type=NotEqual>,\n",
|
||
" <tf.Operation 'Cast' type=Cast>,\n",
|
||
" <tf.Operation 'add' type=AddV2>,\n",
|
||
" <tf.Operation 'zeros_like_1' type=Const>,\n",
|
||
" <tf.Operation 'Maximum' type=Maximum>,\n",
|
||
" <tf.Operation 'while/maximum_iterations' type=Const>,\n",
|
||
" <tf.Operation 'while/loop_counter' type=Const>,\n",
|
||
" <tf.Operation 'while' type=StatelessWhile>,\n",
|
||
" <tf.Operation 'Identity' type=Identity>]"
|
||
]
|
||
},
|
||
"execution_count": 213,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"add_10.get_concrete_function(tf.constant(0)).graph.get_operations()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### Handling Variables and Other Resources in TF Functions"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 214,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=int32, numpy=2>"
|
||
]
|
||
},
|
||
"execution_count": 214,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"counter = tf.Variable(0)\n",
|
||
"\n",
|
||
"@tf.function\n",
|
||
"def increment(counter, c=1):\n",
|
||
" return counter.assign_add(c)\n",
|
||
"\n",
|
||
"increment(counter) # counter is now equal to 1\n",
|
||
"increment(counter) # counter is now equal to 2"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 215,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"name: \"counter\"\n",
|
||
"type: DT_RESOURCE"
|
||
]
|
||
},
|
||
"execution_count": 215,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"function_def = increment.get_concrete_function(counter).function_def\n",
|
||
"function_def.signature.input_arg[0]"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 216,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"counter = tf.Variable(0)\n",
|
||
"\n",
|
||
"@tf.function\n",
|
||
"def increment(c=1):\n",
|
||
" return counter.assign_add(c)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 217,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=int32, numpy=2>"
|
||
]
|
||
},
|
||
"execution_count": 217,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"increment()\n",
|
||
"increment()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 218,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"name: \"assignaddvariableop_resource\"\n",
|
||
"type: DT_RESOURCE"
|
||
]
|
||
},
|
||
"execution_count": 218,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"function_def = increment.get_concrete_function().function_def\n",
|
||
"function_def.signature.input_arg[0]"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 219,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class Counter:\n",
|
||
" def __init__(self):\n",
|
||
" self.counter = tf.Variable(0)\n",
|
||
"\n",
|
||
" @tf.function\n",
|
||
" def increment(self, c=1):\n",
|
||
" return self.counter.assign_add(c)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 220,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=int32, numpy=2>"
|
||
]
|
||
},
|
||
"execution_count": 220,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"c = Counter()\n",
|
||
"c.increment()\n",
|
||
"c.increment()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 221,
|
||
"metadata": {
|
||
"scrolled": true
|
||
},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"def tf__add(x):\n",
|
||
" with ag__.FunctionScope('add_10', 'fscope', ag__.ConversionOptions(recursive=True, user_requested=True, optional_features=(), internal_convert_user_code=True)) as fscope:\n",
|
||
" do_return = False\n",
|
||
" retval_ = ag__.UndefinedReturnValue()\n",
|
||
"\n",
|
||
" def get_state():\n",
|
||
" return (x,)\n",
|
||
"\n",
|
||
" def set_state(vars_):\n",
|
||
" nonlocal x\n",
|
||
" (x,) = vars_\n",
|
||
"\n",
|
||
" def loop_body(itr):\n",
|
||
" nonlocal x\n",
|
||
" i = itr\n",
|
||
" x = ag__.ld(x)\n",
|
||
" x += 1\n",
|
||
" i = ag__.Undefined('i')\n",
|
||
" ag__.for_stmt(ag__.converted_call(ag__.ld(tf).range, (10,), None, fscope), None, loop_body, get_state, set_state, ('x',), {'iterate_names': 'i'})\n",
|
||
" try:\n",
|
||
" do_return = True\n",
|
||
" retval_ = ag__.ld(x)\n",
|
||
" except:\n",
|
||
" do_return = False\n",
|
||
" raise\n",
|
||
" return fscope.ret(retval_, do_return)\n",
|
||
"\n"
|
||
]
|
||
}
|
||
],
|
||
"source": [
|
||
"@tf.function\n",
|
||
"def add_10(x):\n",
|
||
" for i in tf.range(10):\n",
|
||
" x += 1\n",
|
||
" return x\n",
|
||
"\n",
|
||
"print(tf.autograph.to_code(add_10.python_function))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 222,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"# extra code – shows how to display the autograph code with syntax highlighting\n",
|
||
"def display_tf_code(func):\n",
|
||
" from IPython.display import display, Markdown\n",
|
||
" if hasattr(func, \"python_function\"):\n",
|
||
" func = func.python_function\n",
|
||
" code = tf.autograph.to_code(func)\n",
|
||
" display(Markdown(f'```python\\n{code}\\n```'))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 223,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/markdown": [
|
||
"```python\n",
|
||
"def tf__add(x):\n",
|
||
" with ag__.FunctionScope('add_10', 'fscope', ag__.ConversionOptions(recursive=True, user_requested=True, optional_features=(), internal_convert_user_code=True)) as fscope:\n",
|
||
" do_return = False\n",
|
||
" retval_ = ag__.UndefinedReturnValue()\n",
|
||
"\n",
|
||
" def get_state():\n",
|
||
" return (x,)\n",
|
||
"\n",
|
||
" def set_state(vars_):\n",
|
||
" nonlocal x\n",
|
||
" (x,) = vars_\n",
|
||
"\n",
|
||
" def loop_body(itr):\n",
|
||
" nonlocal x\n",
|
||
" i = itr\n",
|
||
" x = ag__.ld(x)\n",
|
||
" x += 1\n",
|
||
" i = ag__.Undefined('i')\n",
|
||
" ag__.for_stmt(ag__.converted_call(ag__.ld(tf).range, (10,), None, fscope), None, loop_body, get_state, set_state, ('x',), {'iterate_names': 'i'})\n",
|
||
" try:\n",
|
||
" do_return = True\n",
|
||
" retval_ = ag__.ld(x)\n",
|
||
" except:\n",
|
||
" do_return = False\n",
|
||
" raise\n",
|
||
" return fscope.ret(retval_, do_return)\n",
|
||
"\n",
|
||
"```"
|
||
],
|
||
"text/plain": [
|
||
"<IPython.core.display.Markdown object>"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
}
|
||
],
|
||
"source": [
|
||
"display_tf_code(add_10)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Using TF Functions with tf.keras (or Not)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"By default, tf.keras will automatically convert your custom code into TF Functions, no need to use\n",
|
||
"`tf.function()`:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 224,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"# Custom loss function\n",
|
||
"def my_mse(y_true, y_pred):\n",
|
||
" print(\"Tracing loss my_mse()\")\n",
|
||
" return tf.reduce_mean(tf.square(y_pred - y_true))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 225,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"# Custom metric function\n",
|
||
"def my_mae(y_true, y_pred):\n",
|
||
" print(\"Tracing metric my_mae()\")\n",
|
||
" return tf.reduce_mean(tf.abs(y_pred - y_true))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 226,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"# Custom layer\n",
|
||
"class MyDense(tf.keras.layers.Layer):\n",
|
||
" def __init__(self, units, activation=None, **kwargs):\n",
|
||
" super().__init__(**kwargs)\n",
|
||
" self.units = units\n",
|
||
" self.activation = tf.keras.activations.get(activation)\n",
|
||
"\n",
|
||
" def build(self, input_shape):\n",
|
||
" self.kernel = self.add_weight(name='kernel', \n",
|
||
" shape=(input_shape[1], self.units),\n",
|
||
" initializer='uniform',\n",
|
||
" trainable=True)\n",
|
||
" self.biases = self.add_weight(name='bias', \n",
|
||
" shape=(self.units,),\n",
|
||
" initializer='zeros',\n",
|
||
" trainable=True)\n",
|
||
"\n",
|
||
" def call(self, X):\n",
|
||
" print(\"Tracing MyDense.call()\")\n",
|
||
" return self.activation(X @ self.kernel + self.biases)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 227,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 228,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"# Custom model\n",
|
||
"class MyModel(tf.keras.Model):\n",
|
||
" def __init__(self, **kwargs):\n",
|
||
" super().__init__(**kwargs)\n",
|
||
" self.hidden1 = MyDense(30, activation=\"relu\")\n",
|
||
" self.hidden2 = MyDense(30, activation=\"relu\")\n",
|
||
" self.output_ = MyDense(1)\n",
|
||
"\n",
|
||
" def call(self, input):\n",
|
||
" print(\"Tracing MyModel.call()\")\n",
|
||
" hidden1 = self.hidden1(input)\n",
|
||
" hidden2 = self.hidden2(hidden1)\n",
|
||
" concat = tf.keras.layers.concatenate([input, hidden2])\n",
|
||
" output = self.output_(concat)\n",
|
||
" return output\n",
|
||
"\n",
|
||
"model = MyModel()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 229,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model.compile(loss=my_mse, optimizer=\"nadam\", metrics=[my_mae])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 230,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/2\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"315/363 [=========================>....] - ETA: 0s - loss: 1.5746 - my_mae: 0.8719Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"363/363 [==============================] - 1s 1ms/step - loss: 1.4303 - my_mae: 0.8219 - val_loss: 0.4932 - val_my_mae: 0.4764\n",
|
||
"Epoch 2/2\n",
|
||
"363/363 [==============================] - 0s 1ms/step - loss: 0.4386 - my_mae: 0.4760 - val_loss: 1.0322 - val_my_mae: 0.4793\n",
|
||
"162/162 [==============================] - 0s 704us/step - loss: 0.4204 - my_mae: 0.4711\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[0.4203692376613617, 0.4711270332336426]"
|
||
]
|
||
},
|
||
"execution_count": 230,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.fit(X_train_scaled, y_train, epochs=2,\n",
|
||
" validation_data=(X_valid_scaled, y_valid))\n",
|
||
"model.evaluate(X_test_scaled, y_test)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"You can turn this off by creating the model with `dynamic=True` (or calling `super().__init__(dynamic=True, **kwargs)` in the model's constructor):"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 231,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 232,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model = MyModel(dynamic=True)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 233,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model.compile(loss=my_mse, optimizer=\"nadam\", metrics=[my_mae])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Now the custom code will be called at each iteration. Let's fit, validate and evaluate with tiny datasets to avoid getting too much output:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 234,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[5.545090198516846, 2.0603599548339844]"
|
||
]
|
||
},
|
||
"execution_count": 234,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.fit(X_train_scaled[:64], y_train[:64], epochs=1,\n",
|
||
" validation_data=(X_valid_scaled[:64], y_valid[:64]), verbose=0)\n",
|
||
"model.evaluate(X_test_scaled[:64], y_test[:64], verbose=0)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Alternatively, you can compile a model with `run_eagerly=True`:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 235,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 236,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model = MyModel()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 237,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model.compile(loss=my_mse, optimizer=\"nadam\", metrics=[my_mae], run_eagerly=True)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 238,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n",
|
||
"Tracing MyModel.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing MyDense.call()\n",
|
||
"Tracing loss my_mse()\n",
|
||
"Tracing metric my_mae()\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"[5.545090198516846, 2.0603599548339844]"
|
||
]
|
||
},
|
||
"execution_count": 238,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"model.fit(X_train_scaled[:64], y_train[:64], epochs=1,\n",
|
||
" validation_data=(X_valid_scaled[:64], y_valid[:64]), verbose=0)\n",
|
||
"model.evaluate(X_test_scaled[:64], y_test[:64], verbose=0)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## Extra Material – Custom Optimizers"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Defining custom optimizers is not very common, but in case you are one of the happy few who gets to write one, here is an example:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 239,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class MyMomentumOptimizer(tf.keras.optimizers.Optimizer):\n",
|
||
" def __init__(self, learning_rate=0.001, momentum=0.9, name=\"MyMomentumOptimizer\", **kwargs):\n",
|
||
" \"\"\"Gradient descent with momentum optimizer.\"\"\"\n",
|
||
" super().__init__(name, **kwargs)\n",
|
||
" self._learning_rate = self._build_learning_rate(learning_rate)\n",
|
||
" self.momentum = momentum\n",
|
||
"\n",
|
||
" def build(self, var_list):\n",
|
||
" \"\"\"Initialize optimizer variables.\n",
|
||
"\n",
|
||
" Args:\n",
|
||
" var_list: list of model variables to build SGD variables on.\n",
|
||
" \"\"\"\n",
|
||
" super().build(var_list)\n",
|
||
" if getattr(self, \"_built\", False):\n",
|
||
" return\n",
|
||
" self.momentums = []\n",
|
||
" for var in var_list:\n",
|
||
" self.momentums.append(\n",
|
||
" self.add_variable_from_reference(\n",
|
||
" model_variable=var, variable_name=\"m\"\n",
|
||
" )\n",
|
||
" )\n",
|
||
" self._built = True\n",
|
||
"\n",
|
||
" def update_step(self, gradient, variable):\n",
|
||
" \"\"\"Update step given gradient and the associated model variable.\"\"\"\n",
|
||
" lr = tf.cast(self.learning_rate, variable.dtype)\n",
|
||
" m = None\n",
|
||
" var_key = self._var_key(variable)\n",
|
||
" momentum = tf.cast(self.momentum, variable.dtype)\n",
|
||
" m = self.momentums[self._index_dict[var_key]]\n",
|
||
" if m is None:\n",
|
||
" variable.assign_add(-gradient * lr)\n",
|
||
" else:\n",
|
||
" m.assign(-gradient * lr + m * momentum)\n",
|
||
" variable.assign_add(m)\n",
|
||
" \n",
|
||
" def get_config(self):\n",
|
||
" base_config = super().get_config()\n",
|
||
" print(\"Config!\")\n",
|
||
" return {\n",
|
||
" **base_config,\n",
|
||
" \"learning_rate\": self._serialize_hyperparameter(self._learning_rate),\n",
|
||
" \"momentum\": self.momentum,\n",
|
||
" }"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 240,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/5\n",
|
||
"363/363 [==============================] - 0s 660us/step - loss: 1.1844\n",
|
||
"Epoch 2/5\n",
|
||
"363/363 [==============================] - 0s 625us/step - loss: 0.5635\n",
|
||
"Epoch 3/5\n",
|
||
"363/363 [==============================] - 0s 609us/step - loss: 0.9703\n",
|
||
"Epoch 4/5\n",
|
||
"363/363 [==============================] - 0s 627us/step - loss: 0.5678\n",
|
||
"Epoch 5/5\n",
|
||
"363/363 [==============================] - 0s 640us/step - loss: 0.6350\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x19c821210>"
|
||
]
|
||
},
|
||
"execution_count": 240,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"optimizer = MyMomentumOptimizer()\n",
|
||
"\n",
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=[8])])\n",
|
||
"model.compile(loss=\"mse\", optimizer=optimizer)\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=5)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Let's compare that to Keras's built-in momentum optimizer:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 241,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"name": "stdout",
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Epoch 1/5\n",
|
||
"363/363 [==============================] - 0s 645us/step - loss: 1.1844\n",
|
||
"Epoch 2/5\n",
|
||
"363/363 [==============================] - 0s 721us/step - loss: 0.5635\n",
|
||
"Epoch 3/5\n",
|
||
"363/363 [==============================] - 0s 612us/step - loss: 0.9703\n",
|
||
"Epoch 4/5\n",
|
||
"363/363 [==============================] - 0s 625us/step - loss: 0.5678\n",
|
||
"Epoch 5/5\n",
|
||
"363/363 [==============================] - 0s 626us/step - loss: 0.6350\n"
|
||
]
|
||
},
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<keras.src.callbacks.History at 0x19ea8da20>"
|
||
]
|
||
},
|
||
"execution_count": 241,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"optimizer = tf.keras.optimizers.SGD(learning_rate=0.001, momentum=0.9)\n",
|
||
"\n",
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=[8])])\n",
|
||
"model.compile(loss=\"mse\", optimizer=optimizer)\n",
|
||
"model.fit(X_train_scaled, y_train, epochs=5)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Yep, we get the exact same model! 👍"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"# Exercises"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## 1. to 11."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"1. TensorFlow is an open-source library for numerical computation, particularly well suited and fine-tuned for large-scale Machine Learning. Its core is similar to NumPy, but it also features GPU support, support for distributed computing, computation graph analysis and optimization capabilities (with a portable graph format that allows you to train a TensorFlow model in one environment and run it in another), an optimization API based on reverse-mode autodiff, and several powerful APIs such as tf.keras, tf.data, tf.image, tf.signal, and more. Other popular Deep Learning libraries include PyTorch, MXNet, Microsoft Cognitive Toolkit, Theano, Caffe2, and Chainer.\n",
|
||
"2. Although TensorFlow offers most of the functionalities provided by NumPy, it is not a drop-in replacement, for a few reasons. First, the names of the functions are not always the same (for example, `tf.reduce_sum()` versus `np.sum()`). Second, some functions do not behave in exactly the same way (for example, `tf.transpose()` creates a transposed copy of a tensor, while NumPy's `T` attribute creates a transposed view, without actually copying any data). Lastly, NumPy arrays are mutable, while TensorFlow tensors are not (but you can use a `tf.Variable` if you need a mutable object).\n",
|
||
"3. Both `tf.range(10)` and `tf.constant(np.arange(10))` return a one-dimensional tensor containing the integers 0 to 9. However, the former uses 32-bit integers while the latter uses 64-bit integers. Indeed, TensorFlow defaults to 32 bits, while NumPy defaults to 64 bits.\n",
|
||
"4. Beyond regular tensors, TensorFlow offers several other data structures, including sparse tensors, tensor arrays, ragged tensors, queues, string tensors, and sets. The last two are actually represented as regular tensors, but TensorFlow provides special functions to manipulate them (in `tf.strings` and `tf.sets`).\n",
|
||
"5. When you want to define a custom loss function, in general you can just implement it as a regular Python function. However, if your custom loss function must support some hyperparameters (or any other state), then you should subclass the `keras.losses.Loss` class and implement the `__init__()` and `call()` methods. If you want the loss function's hyperparameters to be saved along with the model, then you must also implement the `get_config()` method.\n",
|
||
"6. Much like custom loss functions, most metrics can be defined as regular Python functions. But if you want your custom metric to support some hyperparameters (or any other state), then you should subclass the `keras.metrics.Metric` class. Moreover, if computing the metric over a whole epoch is not equivalent to computing the mean metric over all batches in that epoch (e.g., as for the precision and recall metrics), then you should subclass the `keras.metrics.Metric` class and implement the `__init__()`, `update_state()`, and `result()` methods to keep track of a running metric during each epoch. You should also implement the `reset_states()` method unless all it needs to do is reset all variables to 0.0. If you want the state to be saved along with the model, then you should implement the `get_config()` method as well.\n",
|
||
"7. You should distinguish the internal components of your model (i.e., layers or reusable blocks of layers) from the model itself (i.e., the object you will train). The former should subclass the `keras.layers.Layer` class, while the latter should subclass the `keras.models.Model` class.\n",
|
||
"8. Writing your own custom training loop is fairly advanced, so you should only do it if you really need to. Keras provides several tools to customize training without having to write a custom training loop: callbacks, custom regularizers, custom constraints, custom losses, and so on. You should use these instead of writing a custom training loop whenever possible: writing a custom training loop is more error-prone, and it will be harder to reuse the custom code you write. However, in some cases writing a custom training loop is necessary—for example, if you want to use different optimizers for different parts of your neural network, like in the [Wide & Deep paper](https://homl.info/widedeep). A custom training loop can also be useful when debugging, or when trying to understand exactly how training works.\n",
|
||
"9. Custom Keras components should be convertible to TF Functions, which means they should stick to TF operations as much as possible and respect all the rules listed in Chapter 12 (in the _TF Function Rules_ section). If you absolutely need to include arbitrary Python code in a custom component, you can either wrap it in a `tf.py_function()` operation (but this will reduce performance and limit your model's portability) or set `dynamic=True` when creating the custom layer or model (or set `run_eagerly=True` when calling the model's `compile()` method).\n",
|
||
"10. Please refer to Chapter 12 for the list of rules to respect when creating a TF Function (in the _TF Function Rules_ section).\n",
|
||
"11. Creating a dynamic Keras model can be useful for debugging, as it will not compile any custom component to a TF Function, and you can use any Python debugger to debug your code. It can also be useful if you want to include arbitrary Python code in your model (or in your training code), including calls to external libraries. To make a model dynamic, you must set `dynamic=True` when creating it. Alternatively, you can set `run_eagerly=True` when calling the model's `compile()` method. Making a model dynamic prevents Keras from using any of TensorFlow's graph features, so it will slow down training and inference, and you will not have the possibility to export the computation graph, which will limit your model's portability."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## 12. Implement a custom layer that performs _Layer Normalization_\n",
|
||
"_We will use this type of layer in Chapter 15 when using Recurrent Neural Networks._"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### a.\n",
|
||
"_Exercise: The `build()` method should define two trainable weights *α* and *β*, both of shape `input_shape[-1:]` and data type `tf.float32`. *α* should be initialized with 1s, and *β* with 0s._"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Solution: see below."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### b.\n",
|
||
"_Exercise: The `call()` method should compute the mean_ μ _and standard deviation_ σ _of each instance's features. For this, you can use `tf.nn.moments(inputs, axes=-1, keepdims=True)`, which returns the mean μ and the variance σ<sup>2</sup> of all instances (compute the square root of the variance to get the standard deviation). Then the function should compute and return *α*⊗(*X* - μ)/(σ + ε) + *β*, where ⊗ represents itemwise multiplication (`*`) and ε is a smoothing term (small constant to avoid division by zero, e.g., 0.001)._"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 242,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"class LayerNormalization(tf.keras.layers.Layer):\n",
|
||
" def __init__(self, eps=0.001, **kwargs):\n",
|
||
" super().__init__(**kwargs)\n",
|
||
" self.eps = eps\n",
|
||
"\n",
|
||
" def build(self, batch_input_shape):\n",
|
||
" self.alpha = self.add_weight(\n",
|
||
" name=\"alpha\", shape=batch_input_shape[-1:],\n",
|
||
" initializer=\"ones\")\n",
|
||
" self.beta = self.add_weight(\n",
|
||
" name=\"beta\", shape=batch_input_shape[-1:],\n",
|
||
" initializer=\"zeros\")\n",
|
||
"\n",
|
||
" def call(self, X):\n",
|
||
" mean, variance = tf.nn.moments(X, axes=-1, keepdims=True)\n",
|
||
" return self.alpha * (X - mean) / (tf.sqrt(variance + self.eps)) + self.beta\n",
|
||
"\n",
|
||
" def get_config(self):\n",
|
||
" base_config = super().get_config()\n",
|
||
" return {**base_config, \"eps\": self.eps}"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Note that making _ε_ a hyperparameter (`eps`) was not compulsory. Also note that it's preferable to compute `tf.sqrt(variance + self.eps)` rather than `tf.sqrt(variance) + self.eps`. Indeed, the derivative of sqrt(z) is undefined when z=0, so training will bomb whenever the variance vector has at least one component equal to 0. Adding _ε_ within the square root guarantees that this will never happen."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### c.\n",
|
||
"_Exercise: Ensure that your custom layer produces the same (or very nearly the same) output as the `tf.keras.layers.LayerNormalization` layer._"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Let's create one instance of each class, apply them to some data (e.g., the training set), and ensure that the difference is negligeable."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 243,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=3.9782837e-08>"
|
||
]
|
||
},
|
||
"execution_count": 243,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"X = X_train.astype(np.float32)\n",
|
||
"\n",
|
||
"custom_layer_norm = LayerNormalization()\n",
|
||
"keras_layer_norm = tf.keras.layers.LayerNormalization()\n",
|
||
"\n",
|
||
"tf.reduce_mean(tf.keras.losses.mean_absolute_error(\n",
|
||
" keras_layer_norm(X), custom_layer_norm(X)))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Yep, that's close enough. To be extra sure, let's make alpha and beta completely random and compare again:"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 244,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"text/plain": [
|
||
"<tf.Tensor: shape=(), dtype=float32, numpy=1.764704e-08>"
|
||
]
|
||
},
|
||
"execution_count": 244,
|
||
"metadata": {},
|
||
"output_type": "execute_result"
|
||
}
|
||
],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)\n",
|
||
"random_alpha = np.random.rand(X.shape[-1])\n",
|
||
"random_beta = np.random.rand(X.shape[-1])\n",
|
||
"\n",
|
||
"custom_layer_norm.set_weights([random_alpha, random_beta])\n",
|
||
"keras_layer_norm.set_weights([random_alpha, random_beta])\n",
|
||
"\n",
|
||
"tf.reduce_mean(tf.keras.losses.mean_absolute_error(\n",
|
||
" keras_layer_norm(X), custom_layer_norm(X)))"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"Still a negligeable difference! Our custom layer works fine."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"## 13. Train a model using a custom training loop to tackle the Fashion MNIST dataset\n",
|
||
"_The Fashion MNIST dataset was introduced in Chapter 10._"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### a.\n",
|
||
"_Exercise: Display the epoch, iteration, mean training loss, and mean accuracy over each epoch (updated at each iteration), as well as the validation loss and accuracy at the end of each epoch._"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 245,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"(X_train_full, y_train_full), (X_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()\n",
|
||
"X_train_full = X_train_full.astype(np.float32) / 255.\n",
|
||
"X_valid, X_train = X_train_full[:5000], X_train_full[5000:]\n",
|
||
"y_valid, y_train = y_train_full[:5000], y_train_full[5000:]\n",
|
||
"X_test = X_test.astype(np.float32) / 255."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 246,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 247,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"model = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Flatten(input_shape=[28, 28]),\n",
|
||
" tf.keras.layers.Dense(100, activation=\"relu\"),\n",
|
||
" tf.keras.layers.Dense(10, activation=\"softmax\"),\n",
|
||
"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 248,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"n_epochs = 5\n",
|
||
"batch_size = 32\n",
|
||
"n_steps = len(X_train) // batch_size\n",
|
||
"optimizer = tf.keras.optimizers.Nadam(learning_rate=0.01)\n",
|
||
"loss_fn = tf.keras.losses.sparse_categorical_crossentropy\n",
|
||
"mean_loss = tf.keras.metrics.Mean()\n",
|
||
"metrics = [tf.keras.metrics.SparseCategoricalAccuracy()]"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 249,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "a0c8a6efecb44efdbaf6f6f2107a37e6",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"All epochs: 0%| | 0/5 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "ba37766cb41848b4ae0f544c8ddf238f",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 1/5: 0%| | 0/1718 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "dc1d7d5c3f2148b1bb06e974bba09f52",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 2/5: 0%| | 0/1718 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "a9fccf049df546079656b4fa4d53cf8a",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 3/5: 0%| | 0/1718 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "e63ee530efcf46af907e7ee80bea8be0",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 4/5: 0%| | 0/1718 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "a9bbff8ceb73461398293a4f5f1cade8",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 5/5: 0%| | 0/1718 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
}
|
||
],
|
||
"source": [
|
||
"with trange(1, n_epochs + 1, desc=\"All epochs\") as epochs:\n",
|
||
" for epoch in epochs:\n",
|
||
" with trange(1, n_steps + 1, desc=f\"Epoch {epoch}/{n_epochs}\") as steps:\n",
|
||
" for step in steps:\n",
|
||
" X_batch, y_batch = random_batch(X_train, y_train)\n",
|
||
" with tf.GradientTape() as tape:\n",
|
||
" y_pred = model(X_batch)\n",
|
||
" main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred))\n",
|
||
" loss = tf.add_n([main_loss] + model.losses)\n",
|
||
" gradients = tape.gradient(loss, model.trainable_variables)\n",
|
||
" optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n",
|
||
" for variable in model.variables:\n",
|
||
" if variable.constraint is not None:\n",
|
||
" variable.assign(variable.constraint(variable)) \n",
|
||
" status = OrderedDict()\n",
|
||
" mean_loss(loss)\n",
|
||
" status[\"loss\"] = mean_loss.result().numpy()\n",
|
||
" for metric in metrics:\n",
|
||
" metric(y_batch, y_pred)\n",
|
||
" status[metric.name] = metric.result().numpy()\n",
|
||
" steps.set_postfix(status)\n",
|
||
" y_pred = model(X_valid)\n",
|
||
" status[\"val_loss\"] = np.mean(loss_fn(y_valid, y_pred))\n",
|
||
" status[\"val_accuracy\"] = np.mean(tf.keras.metrics.sparse_categorical_accuracy(\n",
|
||
" tf.constant(y_valid, dtype=np.float32), y_pred))\n",
|
||
" steps.set_postfix(status)\n",
|
||
" for metric in [mean_loss] + metrics:\n",
|
||
" metric.reset_states()\n"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {},
|
||
"source": [
|
||
"### b.\n",
|
||
"_Exercise: Try using a different optimizer with a different learning rate for the upper layers and the lower layers._"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 250,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"tf.keras.utils.set_random_seed(42)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 251,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"lower_layers = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Flatten(input_shape=[28, 28]),\n",
|
||
" tf.keras.layers.Dense(100, activation=\"relu\"),\n",
|
||
"])\n",
|
||
"upper_layers = tf.keras.Sequential([\n",
|
||
" tf.keras.layers.Dense(10, activation=\"softmax\"),\n",
|
||
"])\n",
|
||
"model = tf.keras.Sequential([\n",
|
||
" lower_layers, upper_layers\n",
|
||
"])"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 252,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"lower_optimizer = tf.keras.optimizers.SGD(learning_rate=1e-4)\n",
|
||
"upper_optimizer = tf.keras.optimizers.Nadam(learning_rate=1e-3)"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 253,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": [
|
||
"n_epochs = 5\n",
|
||
"batch_size = 32\n",
|
||
"n_steps = len(X_train) // batch_size\n",
|
||
"loss_fn = tf.keras.losses.sparse_categorical_crossentropy\n",
|
||
"mean_loss = tf.keras.metrics.Mean()\n",
|
||
"metrics = [tf.keras.metrics.SparseCategoricalAccuracy()]"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": 254,
|
||
"metadata": {},
|
||
"outputs": [
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "5bdc4d309e3e4f03a27150634a0b89c3",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"All epochs: 0%| | 0/5 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "b816337dd6ba4177a8bcdd41639a8930",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 1/5: 0%| | 0/1718 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "b4cba66f77474d2b9f9de9a207eadf6c",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 2/5: 0%| | 0/1718 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "5649fae110bf4f90bce00b39838e05bf",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 3/5: 0%| | 0/1718 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "7cd99923c6cc43e78faf87b13be2df7b",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 4/5: 0%| | 0/1718 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
},
|
||
{
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "39ad913b024f4a2bb31477cfb2d61fbf",
|
||
"version_major": 2,
|
||
"version_minor": 0
|
||
},
|
||
"text/plain": [
|
||
"Epoch 5/5: 0%| | 0/1718 [00:00<?, ?it/s]"
|
||
]
|
||
},
|
||
"metadata": {},
|
||
"output_type": "display_data"
|
||
}
|
||
],
|
||
"source": [
|
||
"with trange(1, n_epochs + 1, desc=\"All epochs\") as epochs:\n",
|
||
" for epoch in epochs:\n",
|
||
" with trange(1, n_steps + 1, desc=f\"Epoch {epoch}/{n_epochs}\") as steps:\n",
|
||
" for step in steps:\n",
|
||
" X_batch, y_batch = random_batch(X_train, y_train)\n",
|
||
" with tf.GradientTape(persistent=True) as tape:\n",
|
||
" y_pred = model(X_batch)\n",
|
||
" main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred))\n",
|
||
" loss = tf.add_n([main_loss] + model.losses)\n",
|
||
" for layers, optimizer in ((lower_layers, lower_optimizer),\n",
|
||
" (upper_layers, upper_optimizer)):\n",
|
||
" gradients = tape.gradient(loss, layers.trainable_variables)\n",
|
||
" optimizer.apply_gradients(zip(gradients, layers.trainable_variables))\n",
|
||
" del tape\n",
|
||
" for variable in model.variables:\n",
|
||
" if variable.constraint is not None:\n",
|
||
" variable.assign(variable.constraint(variable)) \n",
|
||
" status = OrderedDict()\n",
|
||
" mean_loss(loss)\n",
|
||
" status[\"loss\"] = mean_loss.result().numpy()\n",
|
||
" for metric in metrics:\n",
|
||
" metric(y_batch, y_pred)\n",
|
||
" status[metric.name] = metric.result().numpy()\n",
|
||
" steps.set_postfix(status)\n",
|
||
" y_pred = model(X_valid)\n",
|
||
" status[\"val_loss\"] = np.mean(loss_fn(y_valid, y_pred))\n",
|
||
" status[\"val_accuracy\"] = np.mean(tf.keras.metrics.sparse_categorical_accuracy(\n",
|
||
" tf.constant(y_valid, dtype=np.float32), y_pred))\n",
|
||
" steps.set_postfix(status)\n",
|
||
" for metric in [mean_loss] + metrics:\n",
|
||
" metric.reset_states()"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"execution_count": null,
|
||
"metadata": {},
|
||
"outputs": [],
|
||
"source": []
|
||
}
|
||
],
|
||
"metadata": {
|
||
"kernelspec": {
|
||
"display_name": "Python 3",
|
||
"language": "python",
|
||
"name": "python3"
|
||
},
|
||
"language_info": {
|
||
"codemirror_mode": {
|
||
"name": "ipython",
|
||
"version": 3
|
||
},
|
||
"file_extension": ".py",
|
||
"mimetype": "text/x-python",
|
||
"name": "python",
|
||
"nbconvert_exporter": "python",
|
||
"pygments_lexer": "ipython3",
|
||
"version": "3.10.6"
|
||
}
|
||
},
|
||
"nbformat": 4,
|
||
"nbformat_minor": 4
|
||
}
|