Update and fix list of equations in book_equations.ipynb

main
Aurélien Geron 2018-04-26 14:29:47 +02:00
parent 50d5c8b426
commit 2f4fa014a4
1 changed files with 99 additions and 103 deletions

View File

@ -120,54 +120,54 @@
"**Equation 4-2: Linear Regression model prediction (vectorized form)**\n", "**Equation 4-2: Linear Regression model prediction (vectorized form)**\n",
"\n", "\n",
"$\n", "$\n",
"\\hat{y} = h_{\\mathbf{\\theta}}(\\mathbf{x}) = \\mathbf{\\theta}^T \\cdot \\mathbf{x}\n", "\\hat{y} = h_{\\boldsymbol{\\theta}}(\\mathbf{x}) = \\boldsymbol{\\theta} \\cdot \\mathbf{x}\n",
"$\n", "$\n",
"\n", "\n",
"\n", "\n",
"**Equation 4-3: MSE cost function for a Linear Regression model**\n", "**Equation 4-3: MSE cost function for a Linear Regression model**\n",
"\n", "\n",
"$\n", "$\n",
"\\text{MSE}(\\mathbf{X}, h_{\\mathbf{\\theta}}) = \\dfrac{1}{m} \\sum\\limits_{i=1}^{m}{(\\mathbf{\\theta}^T \\cdot \\mathbf{x}^{(i)} - y^{(i)})^2}\n", "\\text{MSE}(\\mathbf{X}, h_{\\boldsymbol{\\theta}}) = \\dfrac{1}{m} \\sum\\limits_{i=1}^{m}{(\\boldsymbol{\\theta}^T \\mathbf{x}^{(i)} - y^{(i)})^2}\n",
"$\n", "$\n",
"\n", "\n",
"\n", "\n",
"**Equation 4-4: Normal Equation**\n", "**Equation 4-4: Normal Equation**\n",
"\n", "\n",
"$\n", "$\n",
"\\hat{\\mathbf{\\theta}} = (\\mathbf{X}^T \\cdot \\mathbf{X})^{-1} \\cdot \\mathbf{X}^T \\cdot \\mathbf{y}\n", "\\hat{\\boldsymbol{\\theta}} = (\\mathbf{X}^T \\mathbf{X})^{-1} \\mathbf{X}^T \\mathbf{y}\n",
"$\n", "$\n",
"\n", "\n",
"\n", "\n",
"** Partial derivatives notation (page 114):**\n", "** Partial derivatives notation (page 114):**\n",
"\n", "\n",
"$\\frac{\\partial}{\\partial \\theta_j} \\text{MSE}(\\mathbf{\\theta})$\n", "$\\frac{\\partial}{\\partial \\theta_j} \\text{MSE}(\\boldsymbol{\\theta})$\n",
"\n", "\n",
"\n", "\n",
"**Equation 4-5: Partial derivatives of the cost function**\n", "**Equation 4-5: Partial derivatives of the cost function**\n",
"\n", "\n",
"$\n", "$\n",
"\\dfrac{\\partial}{\\partial \\theta_j} \\text{MSE}(\\mathbf{\\theta}) = \\dfrac{2}{m}\\sum\\limits_{i=1}^{m}(\\mathbf{\\theta}^T \\cdot \\mathbf{x}^{(i)} - y^{(i)})\\, x_j^{(i)}\n", "\\dfrac{\\partial}{\\partial \\theta_j} \\text{MSE}(\\boldsymbol{\\theta}) = \\dfrac{2}{m}\\sum\\limits_{i=1}^{m}(\\boldsymbol{\\theta}^T \\mathbf{x}^{(i)} - y^{(i)})\\, x_j^{(i)}\n",
"$\n", "$\n",
"\n", "\n",
"\n", "\n",
"**Equation 4-6: Gradient vector of the cost function**\n", "**Equation 4-6: Gradient vector of the cost function**\n",
"\n", "\n",
"$\n", "$\n",
"\\nabla_{\\mathbf{\\theta}}\\, \\text{MSE}(\\mathbf{\\theta}) =\n", "\\nabla_{\\boldsymbol{\\theta}}\\, \\text{MSE}(\\boldsymbol{\\theta}) =\n",
"\\begin{pmatrix}\n", "\\begin{pmatrix}\n",
" \\frac{\\partial}{\\partial \\theta_0} \\text{MSE}(\\mathbf{\\theta}) \\\\\n", " \\frac{\\partial}{\\partial \\theta_0} \\text{MSE}(\\boldsymbol{\\theta}) \\\\\n",
" \\frac{\\partial}{\\partial \\theta_1} \\text{MSE}(\\mathbf{\\theta}) \\\\\n", " \\frac{\\partial}{\\partial \\theta_1} \\text{MSE}(\\boldsymbol{\\theta}) \\\\\n",
" \\vdots \\\\\n", " \\vdots \\\\\n",
" \\frac{\\partial}{\\partial \\theta_n} \\text{MSE}(\\mathbf{\\theta})\n", " \\frac{\\partial}{\\partial \\theta_n} \\text{MSE}(\\boldsymbol{\\theta})\n",
"\\end{pmatrix}\n", "\\end{pmatrix}\n",
" = \\dfrac{2}{m} \\mathbf{X}^T \\cdot (\\mathbf{X} \\cdot \\mathbf{\\theta} - \\mathbf{y})\n", " = \\dfrac{2}{m} \\mathbf{X}^T (\\mathbf{X} \\boldsymbol{\\theta} - \\mathbf{y})\n",
"$\n", "$\n",
"\n", "\n",
"\n", "\n",
"**Equation 4-7: Gradient Descent step**\n", "**Equation 4-7: Gradient Descent step**\n",
"\n", "\n",
"$\n", "$\n",
"\\mathbf{\\theta}^{(\\text{next step})} = \\mathbf{\\theta} - \\eta \\nabla_{\\mathbf{\\theta}}\\, \\text{MSE}(\\mathbf{\\theta})\n", "\\boldsymbol{\\theta}^{(\\text{next step})} = \\boldsymbol{\\theta} - \\eta \\nabla_{\\boldsymbol{\\theta}}\\, \\text{MSE}(\\boldsymbol{\\theta})\n",
"$\n", "$\n",
"\n", "\n",
"\n", "\n",
@ -183,34 +183,34 @@
"$ \\dfrac{(n+d)!}{d!\\,n!} $\n", "$ \\dfrac{(n+d)!}{d!\\,n!} $\n",
"\n", "\n",
"\n", "\n",
"$ \\alpha \\sum_{i=1}^{n}{\\theta_i^2}$\n", "$ \\alpha \\sum_{i=1}^{n}{{\\theta_i}^2}$\n",
"\n", "\n",
"\n", "\n",
"**Equation 4-8: Ridge Regression cost function**\n", "**Equation 4-8: Ridge Regression cost function**\n",
"\n", "\n",
"$\n", "$\n",
"J(\\mathbf{\\theta}) = \\text{MSE}(\\mathbf{\\theta}) + \\alpha \\dfrac{1}{2}\\sum\\limits_{i=1}^{n}\\theta_i^2\n", "J(\\boldsymbol{\\theta}) = \\text{MSE}(\\boldsymbol{\\theta}) + \\alpha \\dfrac{1}{2}\\sum\\limits_{i=1}^{n}{\\theta_i}^2\n",
"$\n", "$\n",
"\n", "\n",
"\n", "\n",
"**Equation 4-9: Ridge Regression closed-form solution**\n", "**Equation 4-9: Ridge Regression closed-form solution**\n",
"\n", "\n",
"$\n", "$\n",
"\\hat{\\mathbf{\\theta}} = (\\mathbf{X}^T \\cdot \\mathbf{X} + \\alpha \\mathbf{A})^{-1} \\cdot \\mathbf{X}^T \\cdot \\mathbf{y}\n", "\\hat{\\boldsymbol{\\theta}} = (\\mathbf{X}^T \\mathbf{X} + \\alpha \\mathbf{A})^{-1} \\mathbf{X}^T \\mathbf{y}\n",
"$\n", "$\n",
"\n", "\n",
"\n", "\n",
"**Equation 4-10: Lasso Regression cost function**\n", "**Equation 4-10: Lasso Regression cost function**\n",
"\n", "\n",
"$\n", "$\n",
"J(\\mathbf{\\theta}) = \\text{MSE}(\\mathbf{\\theta}) + \\alpha \\sum\\limits_{i=1}^{n}\\left| \\theta_i \\right|\n", "J(\\boldsymbol{\\theta}) = \\text{MSE}(\\boldsymbol{\\theta}) + \\alpha \\sum\\limits_{i=1}^{n}\\left| \\theta_i \\right|\n",
"$\n", "$\n",
"\n", "\n",
"\n", "\n",
"**Equation 4-11: Lasso Regression subgradient vector**\n", "**Equation 4-11: Lasso Regression subgradient vector**\n",
"\n", "\n",
"$\n", "$\n",
"g(\\mathbf{\\theta}, J) = \\nabla_{\\mathbf{\\theta}}\\, \\text{MSE}(\\mathbf{\\theta}) + \\alpha\n", "g(\\boldsymbol{\\theta}, J) = \\nabla_{\\boldsymbol{\\theta}}\\, \\text{MSE}(\\boldsymbol{\\theta}) + \\alpha\n",
"\\begin{pmatrix}\n", "\\begin{pmatrix}\n",
" \\operatorname{sign}(\\theta_1) \\\\\n", " \\operatorname{sign}(\\theta_1) \\\\\n",
" \\operatorname{sign}(\\theta_2) \\\\\n", " \\operatorname{sign}(\\theta_2) \\\\\n",
@ -228,14 +228,14 @@
"**Equation 4-12: Elastic Net cost function**\n", "**Equation 4-12: Elastic Net cost function**\n",
"\n", "\n",
"$\n", "$\n",
"J(\\mathbf{\\theta}) = \\text{MSE}(\\mathbf{\\theta}) + r \\alpha \\sum\\limits_{i=1}^{n}\\left| \\theta_i \\right| + \\dfrac{1 - r}{2} \\alpha \\sum\\limits_{i=1}^{n}{\\theta_i^2}\n", "J(\\boldsymbol{\\theta}) = \\text{MSE}(\\boldsymbol{\\theta}) + r \\alpha \\sum\\limits_{i=1}^{n}\\left| \\theta_i \\right| + \\dfrac{1 - r}{2} \\alpha \\sum\\limits_{i=1}^{n}{{\\theta_i}^2}\n",
"$\n", "$\n",
"\n", "\n",
"\n", "\n",
"**Equation 4-13: Logistic Regression model estimated probability (vectorized form)**\n", "**Equation 4-13: Logistic Regression model estimated probability (vectorized form)**\n",
"\n", "\n",
"$\n", "$\n",
"\\hat{p} = h_{\\mathbf{\\theta}}(\\mathbf{x}) = \\sigma(\\mathbf{\\theta}^T \\cdot \\mathbf{x})\n", "\\hat{p} = h_{\\boldsymbol{\\theta}}(\\mathbf{x}) = \\sigma(\\boldsymbol{\\theta}^T \\mathbf{x})\n",
"$\n", "$\n",
"\n", "\n",
"\n", "\n",
@ -260,7 +260,7 @@
"**Equation 4-16: Cost function of a single training instance**\n", "**Equation 4-16: Cost function of a single training instance**\n",
"\n", "\n",
"$\n", "$\n",
"c(\\mathbf{\\theta}) =\n", "c(\\boldsymbol{\\theta}) =\n",
"\\begin{cases}\n", "\\begin{cases}\n",
" -\\log(\\hat{p}) & \\text{if } y = 1, \\\\\n", " -\\log(\\hat{p}) & \\text{if } y = 1, \\\\\n",
" -\\log(1 - \\hat{p}) & \\text{if } y = 0.\n", " -\\log(1 - \\hat{p}) & \\text{if } y = 0.\n",
@ -271,21 +271,21 @@
"**Equation 4-17: Logistic Regression cost function (log loss)**\n", "**Equation 4-17: Logistic Regression cost function (log loss)**\n",
"\n", "\n",
"$\n", "$\n",
"J(\\mathbf{\\theta}) = -\\dfrac{1}{m} \\sum\\limits_{i=1}^{m}{\\left[ y^{(i)} log\\left(\\hat{p}^{(i)}\\right) + (1 - y^{(i)}) log\\left(1 - \\hat{p}^{(i)}\\right)\\right]}\n", "J(\\boldsymbol{\\theta}) = -\\dfrac{1}{m} \\sum\\limits_{i=1}^{m}{\\left[ y^{(i)} log\\left(\\hat{p}^{(i)}\\right) + (1 - y^{(i)}) log\\left(1 - \\hat{p}^{(i)}\\right)\\right]}\n",
"$\n", "$\n",
"\n", "\n",
"\n", "\n",
"**Equation 4-18: Logistic cost function partial derivatives**\n", "**Equation 4-18: Logistic cost function partial derivatives**\n",
"\n", "\n",
"$\n", "$\n",
"\\dfrac{\\partial}{\\partial \\theta_j} \\text{J}(\\mathbf{\\theta}) = \\dfrac{1}{m}\\sum\\limits_{i=1}^{m}\\left(\\mathbf{\\sigma(\\theta}^T \\cdot \\mathbf{x}^{(i)}) - y^{(i)}\\right)\\, x_j^{(i)}\n", "\\dfrac{\\partial}{\\partial \\theta_j} \\text{J}(\\boldsymbol{\\theta}) = \\dfrac{1}{m}\\sum\\limits_{i=1}^{m}\\left(\\mathbf{\\sigma(\\boldsymbol{\\theta}}^T \\mathbf{x}^{(i)}) - y^{(i)}\\right)\\, x_j^{(i)}\n",
"$\n", "$\n",
"\n", "\n",
"\n", "\n",
"**Equation 4-19: Softmax score for class k**\n", "**Equation 4-19: Softmax score for class k**\n",
"\n", "\n",
"$\n", "$\n",
"s_k(\\mathbf{x}) = ({\\mathbf{\\theta}^{(k)}})^T \\cdot \\mathbf{x}\n", "s_k(\\mathbf{x}) = ({\\boldsymbol{\\theta}^{(k)}})^T \\mathbf{x}\n",
"$\n", "$\n",
"\n", "\n",
"\n", "\n",
@ -299,24 +299,24 @@
"**Equation 4-21: Softmax Regression classifier prediction**\n", "**Equation 4-21: Softmax Regression classifier prediction**\n",
"\n", "\n",
"$\n", "$\n",
"\\hat{y} = \\underset{k}{\\operatorname{argmax}} \\, \\sigma\\left(\\mathbf{s}(\\mathbf{x})\\right)_k = \\underset{k}{\\operatorname{argmax}} \\, s_k(\\mathbf{x}) = \\underset{k}{\\operatorname{argmax}} \\, \\left( ({\\mathbf{\\theta}^{(k)}})^T \\cdot \\mathbf{x} \\right)\n", "\\hat{y} = \\underset{k}{\\operatorname{argmax}} \\, \\sigma\\left(\\mathbf{s}(\\mathbf{x})\\right)_k = \\underset{k}{\\operatorname{argmax}} \\, s_k(\\mathbf{x}) = \\underset{k}{\\operatorname{argmax}} \\, \\left( ({\\boldsymbol{\\theta}^{(k)}})^T \\mathbf{x} \\right)\n",
"$\n", "$\n",
"\n", "\n",
"\n", "\n",
"**Equation 4-22: Cross entropy cost function**\n", "**Equation 4-22: Cross entropy cost function**\n",
"\n", "\n",
"$\n", "$\n",
"J(\\mathbf{\\Theta}) = - \\dfrac{1}{m}\\sum\\limits_{i=1}^{m}\\sum\\limits_{k=1}^{K}{y_k^{(i)}\\log\\left(\\hat{p}_k^{(i)}\\right)}\n", "J(\\boldsymbol{\\Theta}) = - \\dfrac{1}{m}\\sum\\limits_{i=1}^{m}\\sum\\limits_{k=1}^{K}{y_k^{(i)}\\log\\left(\\hat{p}_k^{(i)}\\right)}\n",
"$\n", "$\n",
"\n", "\n",
"**Cross entropy between two discrete probability distributions $p$ and $q$ (page 141):**\n", "**Cross entropy between two discrete probability distributions $p$ and $q$ (page 141):**\n",
"$ H(p, q) = -\\sum\\limits_{x}p(x) \\log q(x) $\n", "$ H(p, q) = -\\sum\\limits_{x}p(x) \\log q(x) $\n",
"\n", "\n",
"\n", "\n",
"**Equation 4-23: Cross entropy gradient vector for class k**\n", "**Equation 4-23: Cross entropy gradient vector for class _k_**\n",
"\n", "\n",
"$\n", "$\n",
"\\nabla_{\\mathbf{\\theta}^{(k)}} \\, J(\\mathbf{\\Theta}) = \\dfrac{1}{m} \\sum\\limits_{i=1}^{m}{ \\left ( \\hat{p}^{(i)}_k - y_k^{(i)} \\right ) \\mathbf{x}^{(i)}}\n", "\\nabla_{\\boldsymbol{\\theta}^{(k)}} \\, J(\\boldsymbol{\\Theta}) = \\dfrac{1}{m} \\sum\\limits_{i=1}^{m}{ \\left ( \\hat{p}^{(i)}_k - y_k^{(i)} \\right ) \\mathbf{x}^{(i)}}\n",
"$\n" "$\n"
] ]
}, },
@ -328,7 +328,7 @@
"**Equation 5-1: Gaussian RBF**\n", "**Equation 5-1: Gaussian RBF**\n",
"\n", "\n",
"$\n", "$\n",
"{\\displaystyle \\phi_{\\gamma}(\\mathbf{x}, \\mathbf{\\ell})} = {\\displaystyle \\exp({\\displaystyle -\\gamma \\left\\| \\mathbf{x} - \\mathbf{\\ell} \\right\\|^2})}\n", "{\\displaystyle \\phi_{\\gamma}(\\mathbf{x}, \\boldsymbol{\\ell})} = {\\displaystyle \\exp({\\displaystyle -\\gamma \\left\\| \\mathbf{x} - \\boldsymbol{\\ell} \\right\\|^2})}\n",
"$\n", "$\n",
"\n", "\n",
"\n", "\n",
@ -336,8 +336,8 @@
"\n", "\n",
"$\n", "$\n",
"\\hat{y} = \\begin{cases}\n", "\\hat{y} = \\begin{cases}\n",
" 0 & \\text{if } \\mathbf{w}^T \\cdot \\mathbf{x} + b < 0, \\\\\n", " 0 & \\text{if } \\mathbf{w}^T \\mathbf{x} + b < 0, \\\\\n",
" 1 & \\text{if } \\mathbf{w}^T \\cdot \\mathbf{x} + b \\geq 0\n", " 1 & \\text{if } \\mathbf{w}^T \\mathbf{x} + b \\geq 0\n",
"\\end{cases}\n", "\\end{cases}\n",
"$\n", "$\n",
"\n", "\n",
@ -346,8 +346,8 @@
"\n", "\n",
"$\n", "$\n",
"\\begin{split}\n", "\\begin{split}\n",
"&\\underset{\\mathbf{w}, b}{\\operatorname{minimize}}\\quad{\\frac{1}{2}\\mathbf{w}^T \\cdot \\mathbf{w}} \\\\\n", "&\\underset{\\mathbf{w}, b}{\\operatorname{minimize}}\\quad{\\frac{1}{2}\\mathbf{w}^T \\mathbf{w}} \\\\\n",
"&\\text{subject to} \\quad t^{(i)}(\\mathbf{w}^T \\cdot \\mathbf{x}^{(i)} + b) \\ge 1 \\quad \\text{for } i = 1, 2, \\dots, m\n", "&\\text{subject to} \\quad t^{(i)}(\\mathbf{w}^T \\mathbf{x}^{(i)} + b) \\ge 1 \\quad \\text{for } i = 1, 2, \\dots, m\n",
"\\end{split}\n", "\\end{split}\n",
"$\n", "$\n",
"\n", "\n",
@ -356,8 +356,8 @@
"\n", "\n",
"$\n", "$\n",
"\\begin{split}\n", "\\begin{split}\n",
"&\\underset{\\mathbf{w}, b, \\mathbf{\\zeta}}{\\operatorname{minimize}}\\quad{\\dfrac{1}{2}\\mathbf{w}^T \\cdot \\mathbf{w} + C \\sum\\limits_{i=1}^m{\\zeta^{(i)}}}\\\\\n", "&\\underset{\\mathbf{w}, b, \\mathbf{\\zeta}}{\\operatorname{minimize}}\\quad{\\dfrac{1}{2}\\mathbf{w}^T \\mathbf{w} + C \\sum\\limits_{i=1}^m{\\zeta^{(i)}}}\\\\\n",
"&\\text{subject to} \\quad t^{(i)}(\\mathbf{w}^T \\cdot \\mathbf{x}^{(i)} + b) \\ge 1 - \\zeta^{(i)} \\quad \\text{and} \\quad \\zeta^{(i)} \\ge 0 \\quad \\text{for } i = 1, 2, \\dots, m\n", "&\\text{subject to} \\quad t^{(i)}(\\mathbf{w}^T \\mathbf{x}^{(i)} + b) \\ge 1 - \\zeta^{(i)} \\quad \\text{and} \\quad \\zeta^{(i)} \\ge 0 \\quad \\text{for } i = 1, 2, \\dots, m\n",
"\\end{split}\n", "\\end{split}\n",
"$\n", "$\n",
"\n", "\n",
@ -366,8 +366,8 @@
"\n", "\n",
"$\n", "$\n",
"\\begin{split}\n", "\\begin{split}\n",
"\\underset{\\mathbf{p}}{\\text{Minimize}} \\quad & \\dfrac{1}{2} \\mathbf{p}^T \\cdot \\mathbf{H} \\cdot \\mathbf{p} \\quad + \\quad \\mathbf{f}^T \\cdot \\mathbf{p} \\\\\n", "\\underset{\\mathbf{p}}{\\text{Minimize}} \\quad & \\dfrac{1}{2} \\mathbf{p}^T \\mathbf{H} \\mathbf{p} \\quad + \\quad \\mathbf{f}^T \\mathbf{p} \\\\\n",
"\\text{subject to} \\quad & \\mathbf{A} \\cdot \\mathbf{p} \\le \\mathbf{b} \\\\\n", "\\text{subject to} \\quad & \\mathbf{A} \\mathbf{p} \\le \\mathbf{b} \\\\\n",
"\\text{where } &\n", "\\text{where } &\n",
"\\begin{cases}\n", "\\begin{cases}\n",
" \\mathbf{p} & \\text{ is an }n_p\\text{-dimensional vector (} n_p = \\text{number of parameters),}\\\\\n", " \\mathbf{p} & \\text{ is an }n_p\\text{-dimensional vector (} n_p = \\text{number of parameters),}\\\\\n",
@ -387,7 +387,7 @@
"\\underset{\\mathbf{\\alpha}}{\\operatorname{minimize}}\n", "\\underset{\\mathbf{\\alpha}}{\\operatorname{minimize}}\n",
"\\dfrac{1}{2}\\sum\\limits_{i=1}^{m}{\n", "\\dfrac{1}{2}\\sum\\limits_{i=1}^{m}{\n",
" \\sum\\limits_{j=1}^{m}{\n", " \\sum\\limits_{j=1}^{m}{\n",
" \\alpha^{(i)} \\alpha^{(j)} t^{(i)} t^{(j)} {\\mathbf{x}^{(i)}}^T \\cdot \\mathbf{x}^{(j)}\n", " \\alpha^{(i)} \\alpha^{(j)} t^{(i)} t^{(j)} {\\mathbf{x}^{(i)}}^T \\mathbf{x}^{(j)}\n",
" }\n", " }\n",
"} \\quad - \\quad \\sum\\limits_{i=1}^{m}{\\alpha^{(i)}}\\\\\n", "} \\quad - \\quad \\sum\\limits_{i=1}^{m}{\\alpha^{(i)}}\\\\\n",
"\\text{subject to}\\quad \\alpha^{(i)} \\ge 0 \\quad \\text{for }i = 1, 2, \\dots, m\n", "\\text{subject to}\\quad \\alpha^{(i)} \\ge 0 \\quad \\text{for }i = 1, 2, \\dots, m\n",
@ -400,7 +400,7 @@
"$\n", "$\n",
"\\begin{split}\n", "\\begin{split}\n",
"&\\hat{\\mathbf{w}} = \\sum_{i=1}^{m}{\\hat{\\alpha}}^{(i)}t^{(i)}\\mathbf{x}^{(i)}\\\\\n", "&\\hat{\\mathbf{w}} = \\sum_{i=1}^{m}{\\hat{\\alpha}}^{(i)}t^{(i)}\\mathbf{x}^{(i)}\\\\\n",
"&\\hat{b} = \\dfrac{1}{n_s}\\sum\\limits_{\\scriptstyle i=1 \\atop {\\scriptstyle {\\hat{\\alpha}}^{(i)} > 0}}^{m}{\\left(1 - t^{(i)}({\\hat{\\mathbf{w}}}^T \\cdot \\mathbf{x}^{(i)})\\right)}\n", "&\\hat{b} = \\dfrac{1}{n_s}\\sum\\limits_{\\scriptstyle i=1 \\atop {\\scriptstyle {\\hat{\\alpha}}^{(i)} > 0}}^{m}{\\left(t^{(i)} - ({\\hat{\\mathbf{w}}}^T \\mathbf{x}^{(i)})\\right)}\n",
"\\end{split}\n", "\\end{split}\n",
"$\n", "$\n",
"\n", "\n",
@ -423,11 +423,11 @@
"\n", "\n",
"$\n", "$\n",
"\\begin{split}\n", "\\begin{split}\n",
"\\phi(\\mathbf{a})^T \\cdot \\phi(\\mathbf{b}) & \\quad = \\begin{pmatrix}\n", "\\phi(\\mathbf{a})^T \\phi(\\mathbf{b}) & \\quad = \\begin{pmatrix}\n",
" {a_1}^2 \\\\\n", " {a_1}^2 \\\\\n",
" \\sqrt{2} \\, a_1 a_2 \\\\\n", " \\sqrt{2} \\, a_1 a_2 \\\\\n",
" {a_2}^2\n", " {a_2}^2\n",
" \\end{pmatrix}^T \\cdot \\begin{pmatrix}\n", " \\end{pmatrix}^T \\begin{pmatrix}\n",
" {b_1}^2 \\\\\n", " {b_1}^2 \\\\\n",
" \\sqrt{2} \\, b_1 b_2 \\\\\n", " \\sqrt{2} \\, b_1 b_2 \\\\\n",
" {b_2}^2\n", " {b_2}^2\n",
@ -435,25 +435,25 @@
" & \\quad = \\left( a_1 b_1 + a_2 b_2 \\right)^2 = \\left( \\begin{pmatrix}\n", " & \\quad = \\left( a_1 b_1 + a_2 b_2 \\right)^2 = \\left( \\begin{pmatrix}\n",
" a_1 \\\\\n", " a_1 \\\\\n",
" a_2\n", " a_2\n",
"\\end{pmatrix}^T \\cdot \\begin{pmatrix}\n", "\\end{pmatrix}^T \\begin{pmatrix}\n",
" b_1 \\\\\n", " b_1 \\\\\n",
" b_2\n", " b_2\n",
" \\end{pmatrix} \\right)^2 = (\\mathbf{a}^T \\cdot \\mathbf{b})^2\n", " \\end{pmatrix} \\right)^2 = (\\mathbf{a}^T \\mathbf{b})^2\n",
"\\end{split}\n", "\\end{split}\n",
"$\n", "$\n",
"\n", "\n",
"**In the text about the kernel trick (page 162):**\n", "**In the text about the kernel trick (page 162):**\n",
"[...], then you can replace this dot product of transformed vectors simply by $ ({\\mathbf{x}^{(i)}}^T \\cdot \\mathbf{x}^{(j)})^2 $\n", "[...], then you can replace this dot product of transformed vectors simply by $ ({\\mathbf{x}^{(i)}}^T \\mathbf{x}^{(j)})^2 $\n",
"\n", "\n",
"\n", "\n",
"**Equation 5-10: Common kernels**\n", "**Equation 5-10: Common kernels**\n",
"\n", "\n",
"$\n", "$\n",
"\\begin{split}\n", "\\begin{split}\n",
"\\text{Linear:} & \\quad K(\\mathbf{a}, \\mathbf{b}) = \\mathbf{a}^T \\cdot \\mathbf{b} \\\\\n", "\\text{Linear:} & \\quad K(\\mathbf{a}, \\mathbf{b}) = \\mathbf{a}^T \\mathbf{b} \\\\\n",
"\\text{Polynomial:} & \\quad K(\\mathbf{a}, \\mathbf{b}) = \\left(\\gamma \\mathbf{a}^T \\cdot \\mathbf{b} + r \\right)^d \\\\\n", "\\text{Polynomial:} & \\quad K(\\mathbf{a}, \\mathbf{b}) = \\left(\\gamma \\mathbf{a}^T \\mathbf{b} + r \\right)^d \\\\\n",
"\\text{Gaussian RBF:} & \\quad K(\\mathbf{a}, \\mathbf{b}) = \\exp({\\displaystyle -\\gamma \\left\\| \\mathbf{a} - \\mathbf{b} \\right\\|^2}) \\\\\n", "\\text{Gaussian RBF:} & \\quad K(\\mathbf{a}, \\mathbf{b}) = \\exp({\\displaystyle -\\gamma \\left\\| \\mathbf{a} - \\mathbf{b} \\right\\|^2}) \\\\\n",
"\\text{Sigmoid:} & \\quad K(\\mathbf{a}, \\mathbf{b}) = \\tanh\\left(\\gamma \\mathbf{a}^T \\cdot \\mathbf{b} + r\\right)\n", "\\text{Sigmoid:} & \\quad K(\\mathbf{a}, \\mathbf{b}) = \\tanh\\left(\\gamma \\mathbf{a}^T \\mathbf{b} + r\\right)\n",
"\\end{split}\n", "\\end{split}\n",
"$\n", "$\n",
"\n", "\n",
@ -461,8 +461,8 @@
"\n", "\n",
"$\n", "$\n",
"\\begin{split}\n", "\\begin{split}\n",
"h_{\\hat{\\mathbf{w}}, \\hat{b}}\\left(\\phi(\\mathbf{x}^{(n)})\\right) & = \\,\\hat{\\mathbf{w}}^T \\cdot \\phi(\\mathbf{x}^{(n)}) + \\hat{b} = \\left(\\sum_{i=1}^{m}{\\hat{\\alpha}}^{(i)}t^{(i)}\\phi(\\mathbf{x}^{(i)})\\right)^T \\cdot \\phi(\\mathbf{x}^{(n)}) + \\hat{b}\\\\\n", "h_{\\hat{\\mathbf{w}}, \\hat{b}}\\left(\\phi(\\mathbf{x}^{(n)})\\right) & = \\,\\hat{\\mathbf{w}}^T \\phi(\\mathbf{x}^{(n)}) + \\hat{b} = \\left(\\sum_{i=1}^{m}{\\hat{\\alpha}}^{(i)}t^{(i)}\\phi(\\mathbf{x}^{(i)})\\right)^T \\phi(\\mathbf{x}^{(n)}) + \\hat{b}\\\\\n",
" & = \\, \\sum_{i=1}^{m}{\\hat{\\alpha}}^{(i)}t^{(i)}\\left(\\phi(\\mathbf{x}^{(i)})^T \\cdot \\phi(\\mathbf{x}^{(n)})\\right) + \\hat{b}\\\\\n", " & = \\, \\sum_{i=1}^{m}{\\hat{\\alpha}}^{(i)}t^{(i)}\\left(\\phi(\\mathbf{x}^{(i)})^T \\phi(\\mathbf{x}^{(n)})\\right) + \\hat{b}\\\\\n",
" & = \\sum\\limits_{\\scriptstyle i=1 \\atop {\\scriptstyle {\\hat{\\alpha}}^{(i)} > 0}}^{m}{\\hat{\\alpha}}^{(i)}t^{(i)} K(\\mathbf{x}^{(i)}, \\mathbf{x}^{(n)}) + \\hat{b}\n", " & = \\sum\\limits_{\\scriptstyle i=1 \\atop {\\scriptstyle {\\hat{\\alpha}}^{(i)} > 0}}^{m}{\\hat{\\alpha}}^{(i)}t^{(i)} K(\\mathbf{x}^{(i)}, \\mathbf{x}^{(n)}) + \\hat{b}\n",
"\\end{split}\n", "\\end{split}\n",
"$\n", "$\n",
@ -472,10 +472,10 @@
"\n", "\n",
"$\n", "$\n",
"\\begin{split}\n", "\\begin{split}\n",
"\\hat{b} & = \\dfrac{1}{n_s}\\sum\\limits_{\\scriptstyle i=1 \\atop {\\scriptstyle {\\hat{\\alpha}}^{(i)} > 0}}^{m}{\\left(1 - t^{(i)}{\\hat{\\mathbf{w}}}^T \\cdot \\phi(\\mathbf{x}^{(i)})\\right)} = \\dfrac{1}{n_s}\\sum\\limits_{\\scriptstyle i=1 \\atop {\\scriptstyle {\\hat{\\alpha}}^{(i)} > 0}}^{m}{\\left(1 - t^{(i)}{\n", "\\hat{b} & = \\dfrac{1}{n_s}\\sum\\limits_{\\scriptstyle i=1 \\atop {\\scriptstyle {\\hat{\\alpha}}^{(i)} > 0}}^{m}{\\left(t^{(i)} - {\\hat{\\mathbf{w}}}^T \\phi(\\mathbf{x}^{(i)})\\right)} = \\dfrac{1}{n_s}\\sum\\limits_{\\scriptstyle i=1 \\atop {\\scriptstyle {\\hat{\\alpha}}^{(i)} > 0}}^{m}{\\left(t^{(i)} - {\n",
" \\left(\\sum_{j=1}^{m}{\\hat{\\alpha}}^{(j)}t^{(j)}\\phi(\\mathbf{x}^{(j)})\\right)\n", " \\left(\\sum_{j=1}^{m}{\\hat{\\alpha}}^{(j)}t^{(j)}\\phi(\\mathbf{x}^{(j)})\\right)\n",
" }^T \\cdot \\phi(\\mathbf{x}^{(i)})\\right)}\\\\\n", " }^T \\phi(\\mathbf{x}^{(i)})\\right)}\\\\\n",
" & = \\dfrac{1}{n_s}\\sum\\limits_{\\scriptstyle i=1 \\atop {\\scriptstyle {\\hat{\\alpha}}^{(i)} > 0}}^{m}{\\left(1 - t^{(i)}\n", " & = \\dfrac{1}{n_s}\\sum\\limits_{\\scriptstyle i=1 \\atop {\\scriptstyle {\\hat{\\alpha}}^{(i)} > 0}}^{m}{\\left(t^{(i)} -\n",
"\\sum\\limits_{\\scriptstyle j=1 \\atop {\\scriptstyle {\\hat{\\alpha}}^{(j)} > 0}}^{m}{\n", "\\sum\\limits_{\\scriptstyle j=1 \\atop {\\scriptstyle {\\hat{\\alpha}}^{(j)} > 0}}^{m}{\n",
" {\\hat{\\alpha}}^{(j)} t^{(j)} K(\\mathbf{x}^{(i)},\\mathbf{x}^{(j)})\n", " {\\hat{\\alpha}}^{(j)} t^{(j)} K(\\mathbf{x}^{(i)},\\mathbf{x}^{(j)})\n",
"}\n", "}\n",
@ -487,7 +487,7 @@
"**Equation 5-13: Linear SVM classifier cost function**\n", "**Equation 5-13: Linear SVM classifier cost function**\n",
"\n", "\n",
"$\n", "$\n",
"J(\\mathbf{w}, b) = \\dfrac{1}{2} \\mathbf{w}^T \\cdot \\mathbf{w} \\quad + \\quad C {\\displaystyle \\sum\\limits_{i=1}^{m}max\\left(0, 1 - t^{(i)}(\\mathbf{w}^T \\cdot \\mathbf{x}^{(i)} + b) \\right)}\n", "J(\\mathbf{w}, b) = \\dfrac{1}{2} \\mathbf{w}^T \\mathbf{w} \\quad + \\quad C {\\displaystyle \\sum\\limits_{i=1}^{m}max\\left(0, t^{(i)} - (\\mathbf{w}^T \\mathbf{x}^{(i)} + b) \\right)}\n",
"$\n", "$\n",
"\n", "\n",
"\n" "\n"
@ -519,13 +519,13 @@
"\n", "\n",
"**Entropy computation example (page 173):**\n", "**Entropy computation example (page 173):**\n",
"\n", "\n",
"$ -\\frac{49}{54}\\log(\\frac{49}{54}) - \\frac{5}{54}\\log(\\frac{5}{54}) $\n", "$ -\\frac{49}{54}\\log_2(\\frac{49}{54}) - \\frac{5}{54}\\log_2(\\frac{5}{54}) $\n",
"\n", "\n",
"\n", "\n",
"**Equation 6-3: Entropy**\n", "**Equation 6-3: Entropy**\n",
"\n", "\n",
"$\n", "$\n",
"H_i = -\\sum\\limits_{k=1 \\atop p_{i,k} \\ne 0}^{n}{{p_{i,k}}\\log(p_{i,k})}\n", "H_i = -\\sum\\limits_{k=1 \\atop p_{i,k} \\ne 0}^{n}{{p_{i,k}}\\log_2(p_{i,k})}\n",
"$\n", "$\n",
"\n", "\n",
"\n", "\n",
@ -547,7 +547,7 @@
"source": [ "source": [
"# Chapter 7\n", "# Chapter 7\n",
"\n", "\n",
"**Equation 7-1: Weighted error rate of the j^th^ predictor**\n", "**Equation 7-1: Weighted error rate of the $j^\\text{th}$ predictor**\n",
"\n", "\n",
"$\n", "$\n",
"r_j = \\dfrac{\\displaystyle \\sum\\limits_{\\textstyle {i=1 \\atop \\hat{y}_j^{(i)} \\ne y^{(i)}}}^{m}{w^{(i)}}}{\\displaystyle \\sum\\limits_{i=1}^{m}{w^{(i)}}} \\quad\n", "r_j = \\dfrac{\\displaystyle \\sum\\limits_{\\textstyle {i=1 \\atop \\hat{y}_j^{(i)} \\ne y^{(i)}}}^{m}{w^{(i)}}}{\\displaystyle \\sum\\limits_{i=1}^{m}{w^{(i)}}} \\quad\n",
@ -611,14 +611,14 @@
"**Equation 8-2: Projecting the training set down to _d_ dimensions**\n", "**Equation 8-2: Projecting the training set down to _d_ dimensions**\n",
"\n", "\n",
"$\n", "$\n",
"\\mathbf{X}_{d\\text{-proj}} = \\mathbf{X} \\cdot \\mathbf{W}_d\n", "\\mathbf{X}_{d\\text{-proj}} = \\mathbf{X} \\mathbf{W}_d\n",
"$\n", "$\n",
"\n", "\n",
"\n", "\n",
"**Equation 8-3: PCA inverse transformation, back to the original number of dimensions**\n", "**Equation 8-3: PCA inverse transformation, back to the original number of dimensions**\n",
"\n", "\n",
"$\n", "$\n",
"\\mathbf{X}_{\\text{recovered}} = \\mathbf{X}_{d\\text{-proj}} \\cdot {\\mathbf{W}_d}^T\n", "\\mathbf{X}_{\\text{recovered}} = \\mathbf{X}_{d\\text{-proj}} {\\mathbf{W}_d}^T\n",
"$\n", "$\n",
"\n", "\n",
"\n", "\n",
@ -659,7 +659,7 @@
"**Equation 9-1: Rectified linear unit**\n", "**Equation 9-1: Rectified linear unit**\n",
"\n", "\n",
"$\n", "$\n",
"h_{\\mathbf{w}, b}(\\mathbf{X}) = \\max(\\mathbf{X} \\cdot \\mathbf{w} + b, 0)\n", "h_{\\mathbf{w}, b}(\\mathbf{X}) = \\max(\\mathbf{X} \\mathbf{w} + b, 0)\n",
"$" "$"
] ]
}, },
@ -760,8 +760,8 @@
"\n", "\n",
"**Equation 11-4: Momentum algorithm**\n", "**Equation 11-4: Momentum algorithm**\n",
"\n", "\n",
"1. $\\mathbf{m} \\gets \\beta \\mathbf{m} - \\eta \\nabla_\\mathbf{\\theta}J(\\mathbf{\\theta})$\n", "1. $\\mathbf{m} \\gets \\beta \\mathbf{m} - \\eta \\nabla_\\boldsymbol{\\theta}J(\\boldsymbol{\\theta})$\n",
"2. $\\mathbf{\\theta} \\gets \\mathbf{\\theta} + \\mathbf{m}$\n", "2. $\\boldsymbol{\\theta} \\gets \\boldsymbol{\\theta} + \\mathbf{m}$\n",
"\n", "\n",
"**In the text page 296:**\n", "**In the text page 296:**\n",
"\n", "\n",
@ -770,36 +770,36 @@
"\n", "\n",
"**Equation 11-5: Nesterov Accelerated Gradient algorithm**\n", "**Equation 11-5: Nesterov Accelerated Gradient algorithm**\n",
"\n", "\n",
"1. $\\mathbf{m} \\gets \\beta \\mathbf{m} - \\eta \\nabla_\\mathbf{\\theta}J(\\mathbf{\\theta} + \\beta \\mathbf{m})$\n", "1. $\\mathbf{m} \\gets \\beta \\mathbf{m} - \\eta \\nabla_\\boldsymbol{\\theta}J(\\boldsymbol{\\theta} + \\beta \\mathbf{m})$\n",
"2. $\\mathbf{\\theta} \\gets \\mathbf{\\theta} + \\mathbf{m}$\n", "2. $\\boldsymbol{\\theta} \\gets \\boldsymbol{\\theta} + \\mathbf{m}$\n",
"\n", "\n",
"**Equation 11-6: AdaGrad algorithm**\n", "**Equation 11-6: AdaGrad algorithm**\n",
"\n", "\n",
"1. $\\mathbf{s} \\gets \\mathbf{s} + \\nabla_\\mathbf{\\theta}J(\\mathbf{\\theta}) \\otimes \\nabla_\\mathbf{\\theta}J(\\mathbf{\\theta})$\n", "1. $\\mathbf{s} \\gets \\mathbf{s} + \\nabla_\\boldsymbol{\\theta}J(\\boldsymbol{\\theta}) \\otimes \\nabla_\\boldsymbol{\\theta}J(\\boldsymbol{\\theta})$\n",
"2. $\\mathbf{\\theta} \\gets \\mathbf{\\theta} - \\eta \\, \\nabla_\\mathbf{\\theta}J(\\mathbf{\\theta}) \\oslash {\\sqrt{\\mathbf{s} + \\epsilon}}$\n", "2. $\\boldsymbol{\\theta} \\gets \\boldsymbol{\\theta} - \\eta \\, \\nabla_\\boldsymbol{\\theta}J(\\boldsymbol{\\theta}) \\oslash {\\sqrt{\\mathbf{s} + \\epsilon}}$\n",
"\n", "\n",
"**In the text page 298-299:**\n", "**In the text page 298-299:**\n",
"\n", "\n",
"This vectorized form is equivalent to computing $s_i \\gets s_i + \\left( \\dfrac{\\partial J(\\mathbf{\\theta})}{\\partial \\theta_i} \\right)^2$ for each element $s_i$ of the vector $\\mathbf{s}$.\n", "This vectorized form is equivalent to computing $s_i \\gets s_i + \\left( \\dfrac{\\partial J(\\boldsymbol{\\theta})}{\\partial \\theta_i} \\right)^2$ for each element $s_i$ of the vector $\\mathbf{s}$.\n",
"\n", "\n",
"**In the text page 299:**\n", "**In the text page 299:**\n",
"\n", "\n",
"This vectorized form is equivalent to computing $ \\theta_i \\gets \\theta_i - \\eta \\, \\dfrac{\\partial J(\\mathbf{\\theta})}{\\partial \\theta_i} \\dfrac{1}{\\sqrt{s_i + \\epsilon}} $ for all parameters $\\theta_i$ (simultaneously).\n", "This vectorized form is equivalent to computing $ \\theta_i \\gets \\theta_i - \\eta \\, \\dfrac{\\partial J(\\boldsymbol{\\theta})}{\\partial \\theta_i} \\dfrac{1}{\\sqrt{s_i + \\epsilon}} $ for all parameters $\\theta_i$ (simultaneously).\n",
"\n", "\n",
"\n", "\n",
"**Equation 11-7: RMSProp algorithm**\n", "**Equation 11-7: RMSProp algorithm**\n",
"\n", "\n",
"1. $\\mathbf{s} \\gets \\beta \\mathbf{s} + (1 - \\beta ) \\nabla_\\mathbf{\\theta}J(\\mathbf{\\theta}) \\otimes \\nabla_\\mathbf{\\theta}J(\\mathbf{\\theta})$\n", "1. $\\mathbf{s} \\gets \\beta \\mathbf{s} + (1 - \\beta ) \\nabla_\\boldsymbol{\\theta}J(\\boldsymbol{\\theta}) \\otimes \\nabla_\\boldsymbol{\\theta}J(\\boldsymbol{\\theta})$\n",
"2. $\\mathbf{\\theta} \\gets \\mathbf{\\theta} - \\eta \\, \\nabla_\\mathbf{\\theta}J(\\mathbf{\\theta}) \\oslash {\\sqrt{\\mathbf{s} + \\epsilon}}$\n", "2. $\\boldsymbol{\\theta} \\gets \\boldsymbol{\\theta} - \\eta \\, \\nabla_\\boldsymbol{\\theta}J(\\boldsymbol{\\theta}) \\oslash {\\sqrt{\\mathbf{s} + \\epsilon}}$\n",
"\n", "\n",
"\n", "\n",
"**Equation 11-8: Adam algorithm**\n", "**Equation 11-8: Adam algorithm**\n",
"\n", "\n",
"1. $\\mathbf{m} \\gets \\beta_1 \\mathbf{m} - (1 - \\beta_1) \\nabla_\\mathbf{\\theta}J(\\mathbf{\\theta})$\n", "1. $\\mathbf{m} \\gets \\beta_1 \\mathbf{m} - (1 - \\beta_1) \\nabla_\\boldsymbol{\\theta}J(\\boldsymbol{\\theta})$\n",
"2. $\\mathbf{s} \\gets \\beta_2 \\mathbf{s} + (1 - \\beta_2) \\nabla_\\mathbf{\\theta}J(\\mathbf{\\theta}) \\otimes \\nabla_\\mathbf{\\theta}J(\\mathbf{\\theta})$\n", "2. $\\mathbf{s} \\gets \\beta_2 \\mathbf{s} + (1 - \\beta_2) \\nabla_\\boldsymbol{\\theta}J(\\boldsymbol{\\theta}) \\otimes \\nabla_\\boldsymbol{\\theta}J(\\boldsymbol{\\theta})$\n",
"3. $\\mathbf{m} \\gets \\left(\\dfrac{\\mathbf{m}}{1 - {\\beta_1}^T}\\right)$\n", "3. $\\hat{\\mathbf{m}} \\gets \\left(\\dfrac{\\mathbf{m}}{1 - {\\beta_1}^T}\\right)$\n",
"4. $\\mathbf{s} \\gets \\left(\\dfrac{\\mathbf{s}}{1 - {\\beta_2}^T}\\right)$\n", "4. $\\hat{\\mathbf{s}} \\gets \\left(\\dfrac{\\mathbf{s}}{1 - {\\beta_2}^T}\\right)$\n",
"5. $\\mathbf{\\theta} \\gets \\mathbf{\\theta} + \\eta \\, \\mathbf{m} \\oslash {\\sqrt{\\mathbf{s} + \\epsilon}}$\n", "5. $\\boldsymbol{\\theta} \\gets \\boldsymbol{\\theta} + \\eta \\, \\hat{\\mathbf{m}} \\oslash {\\sqrt{\\hat{\\mathbf{s}} + \\epsilon}}$\n",
"\n", "\n",
"**In the text page 309:**\n", "**In the text page 309:**\n",
"\n", "\n",
@ -818,7 +818,7 @@
"**Equation 13-1: Computing the output of a neuron in a convolutional layer**\n", "**Equation 13-1: Computing the output of a neuron in a convolutional layer**\n",
"\n", "\n",
"$\n", "$\n",
"z_{i,j,k} = b_k + \\sum\\limits_{u = 0}^{f_h - 1} \\, \\, \\sum\\limits_{v = 0}^{f_w - 1} \\, \\, \\sum\\limits_{k' = 0}^{f_{n'} - 1} \\, \\, x_{i', j', k'} . w_{u, v, k', k}\n", "z_{i,j,k} = b_k + \\sum\\limits_{u = 0}^{f_h - 1} \\, \\, \\sum\\limits_{v = 0}^{f_w - 1} \\, \\, \\sum\\limits_{k' = 0}^{f_{n'} - 1} \\, \\, x_{i', j', k'} \\times w_{u, v, k', k}\n",
"\\quad \\text{with }\n", "\\quad \\text{with }\n",
"\\begin{cases}\n", "\\begin{cases}\n",
"i' = i \\times s_h + u \\\\\n", "i' = i \\times s_h + u \\\\\n",
@ -845,10 +845,10 @@
"source": [ "source": [
"# Chapter 14\n", "# Chapter 14\n",
"\n", "\n",
"**Equation 14-1: Output of a single recurrent neuron for a single instance**\n", "**Equation 14-1: Output of a recurrent layer for a single instance**\n",
"\n", "\n",
"$\n", "$\n",
"\\mathbf{y}_{(t)} = \\phi\\left({{\\mathbf{x}_{(t)}}^T \\cdot \\mathbf{w}_x} + {\\mathbf{y}_{(t-1)}}^T \\cdot {\\mathbf{w}_y} + b \\right)\n", "\\mathbf{y}_{(t)} = \\phi\\left({\\mathbf{W}_x}^T{\\mathbf{x}_{(t)}} + {{\\mathbf{W}_y}^T\\mathbf{y}_{(t-1)}} + \\mathbf{b} \\right)\n",
"$\n", "$\n",
"\n", "\n",
"\n", "\n",
@ -856,10 +856,10 @@
"\n", "\n",
"$\n", "$\n",
"\\begin{split}\n", "\\begin{split}\n",
"\\mathbf{Y}_{(t)} & = \\phi\\left(\\mathbf{X}_{(t)} \\cdot \\mathbf{W}_{x} + \\mathbf{Y}_{(t-1)}\\cdot \\mathbf{W}_{y} + \\mathbf{b} \\right) \\\\\n", "\\mathbf{Y}_{(t)} & = \\phi\\left(\\mathbf{X}_{(t)} \\mathbf{W}_{x} + \\mathbf{Y}_{(t-1)} \\mathbf{W}_{y} + \\mathbf{b} \\right) \\\\\n",
"& = \\phi\\left(\n", "& = \\phi\\left(\n",
"\\left[\\mathbf{X}_{(t)} \\quad \\mathbf{Y}_{(t-1)} \\right]\n", "\\left[\\mathbf{X}_{(t)} \\quad \\mathbf{Y}_{(t-1)} \\right]\n",
" \\cdot \\mathbf{W} + \\mathbf{b} \\right) \\text{ with } \\mathbf{W}=\n", " \\mathbf{W} + \\mathbf{b} \\right) \\text{ with } \\mathbf{W}=\n",
"\\left[ \\begin{matrix}\n", "\\left[ \\begin{matrix}\n",
" \\mathbf{W}_x\\\\\n", " \\mathbf{W}_x\\\\\n",
" \\mathbf{W}_y\n", " \\mathbf{W}_y\n",
@ -876,10 +876,10 @@
"\n", "\n",
"$\n", "$\n",
"\\begin{split}\n", "\\begin{split}\n",
"\\mathbf{i}_{(t)}&=\\sigma({\\mathbf{W}_{xi}}^T \\cdot \\mathbf{x}_{(t)} + {\\mathbf{W}_{hi}}^T \\cdot \\mathbf{h}_{(t-1)} + \\mathbf{b}_i)\\\\\n", "\\mathbf{i}_{(t)}&=\\sigma({\\mathbf{W}_{xi}}^T \\mathbf{x}_{(t)} + {\\mathbf{W}_{hi}}^T \\mathbf{h}_{(t-1)} + \\mathbf{b}_i)\\\\\n",
"\\mathbf{f}_{(t)}&=\\sigma({\\mathbf{W}_{xf}}^T \\cdot \\mathbf{x}_{(t)} + {\\mathbf{W}_{hf}}^T \\cdot \\mathbf{h}_{(t-1)} + \\mathbf{b}_f)\\\\\n", "\\mathbf{f}_{(t)}&=\\sigma({\\mathbf{W}_{xf}}^T \\mathbf{x}_{(t)} + {\\mathbf{W}_{hf}}^T \\mathbf{h}_{(t-1)} + \\mathbf{b}_f)\\\\\n",
"\\mathbf{o}_{(t)}&=\\sigma({\\mathbf{W}_{xo}}^T \\cdot \\mathbf{x}_{(t)} + {\\mathbf{W}_{ho}}^T \\cdot \\mathbf{h}_{(t-1)} + \\mathbf{b}_o)\\\\\n", "\\mathbf{o}_{(t)}&=\\sigma({\\mathbf{W}_{xo}}^T \\mathbf{x}_{(t)} + {\\mathbf{W}_{ho}}^T \\mathbf{h}_{(t-1)} + \\mathbf{b}_o)\\\\\n",
"\\mathbf{g}_{(t)}&=\\operatorname{tanh}({\\mathbf{W}_{xg}}^T \\cdot \\mathbf{x}_{(t)} + {\\mathbf{W}_{hg}}^T \\cdot \\mathbf{h}_{(t-1)} + \\mathbf{b}_g)\\\\\n", "\\mathbf{g}_{(t)}&=\\operatorname{tanh}({\\mathbf{W}_{xg}}^T \\mathbf{x}_{(t)} + {\\mathbf{W}_{hg}}^T \\mathbf{h}_{(t-1)} + \\mathbf{b}_g)\\\\\n",
"\\mathbf{c}_{(t)}&=\\mathbf{f}_{(t)} \\otimes \\mathbf{c}_{(t-1)} \\, + \\, \\mathbf{i}_{(t)} \\otimes \\mathbf{g}_{(t)}\\\\\n", "\\mathbf{c}_{(t)}&=\\mathbf{f}_{(t)} \\otimes \\mathbf{c}_{(t-1)} \\, + \\, \\mathbf{i}_{(t)} \\otimes \\mathbf{g}_{(t)}\\\\\n",
"\\mathbf{y}_{(t)}&=\\mathbf{h}_{(t)} = \\mathbf{o}_{(t)} \\otimes \\operatorname{tanh}(\\mathbf{c}_{(t)})\n", "\\mathbf{y}_{(t)}&=\\mathbf{h}_{(t)} = \\mathbf{o}_{(t)} \\otimes \\operatorname{tanh}(\\mathbf{c}_{(t)})\n",
"\\end{split}\n", "\\end{split}\n",
@ -890,9 +890,9 @@
"\n", "\n",
"$\n", "$\n",
"\\begin{split}\n", "\\begin{split}\n",
"\\mathbf{z}_{(t)}&=\\sigma({\\mathbf{W}_{xz}}^T \\cdot \\mathbf{x}_{(t)} + {\\mathbf{W}_{hz}}^T \\cdot \\mathbf{h}_{(t-1)}) \\\\\n", "\\mathbf{z}_{(t)}&=\\sigma({\\mathbf{W}_{xz}}^T \\mathbf{x}_{(t)} + {\\mathbf{W}_{hz}}^T \\mathbf{h}_{(t-1)}) \\\\\n",
"\\mathbf{r}_{(t)}&=\\sigma({\\mathbf{W}_{xr}}^T \\cdot \\mathbf{x}_{(t)} + {\\mathbf{W}_{hr}}^T \\cdot \\mathbf{h}_{(t-1)}) \\\\\n", "\\mathbf{r}_{(t)}&=\\sigma({\\mathbf{W}_{xr}}^T \\mathbf{x}_{(t)} + {\\mathbf{W}_{hr}}^T \\mathbf{h}_{(t-1)}) \\\\\n",
"\\mathbf{g}_{(t)}&=\\operatorname{tanh}\\left({\\mathbf{W}_{xg}}^T \\cdot \\mathbf{x}_{(t)} + {\\mathbf{W}_{hg}}^T \\cdot (\\mathbf{r}_{(t)} \\otimes \\mathbf{h}_{(t-1)})\\right) \\\\\n", "\\mathbf{g}_{(t)}&=\\operatorname{tanh}\\left({\\mathbf{W}_{xg}}^T \\mathbf{x}_{(t)} + {\\mathbf{W}_{hg}}^T (\\mathbf{r}_{(t)} \\otimes \\mathbf{h}_{(t-1)})\\right) \\\\\n",
"\\mathbf{h}_{(t)}&=(1-\\mathbf{z}_{(t)}) \\otimes \\mathbf{h}_{(t-1)} + \\mathbf{z}_{(t)} \\otimes \\mathbf{g}_{(t)}\n", "\\mathbf{h}_{(t)}&=(1-\\mathbf{z}_{(t)}) \\otimes \\mathbf{h}_{(t-1)} + \\mathbf{z}_{(t)} \\otimes \\mathbf{g}_{(t)}\n",
"\\end{split}\n", "\\end{split}\n",
"$\n", "$\n",
@ -974,18 +974,14 @@
"**Equation 16-6: Q-Learning using an exploration function**\n", "**Equation 16-6: Q-Learning using an exploration function**\n",
"\n", "\n",
"$\n", "$\n",
" Q(s, a) \\gets (1-\\alpha)Q(s,a) + \\alpha\\left(r + \\gamma . \\underset{\\alpha'}{\\max}f(Q(s', a'), N(s', a'))\\right)\n", "Q(s, a) \\gets (1-\\alpha)Q(s,a) + \\alpha\\left(r + \\gamma . \\underset{\\alpha'}{\\max}f(Q(s', a'), N(s', a'))\\right)\n",
"$\n", "$\n",
"\n", "\n",
"\n", "**Equation 16-7: Target Q-Value**\n",
"**Equation 16-7: Deep Q-Learning cost function**\n",
"\n", "\n",
"$\n", "$\n",
"\\begin{split}\n", "y(s,a)=r+\\gamma\\,\\max_{a'}\\,Q_\\boldsymbol\\theta(s',a')\n",
"& J(\\mathbf{\\theta}_\\text{critic}) = \\dfrac{1}{m}\\sum\\limits_{i=1}^m\\left(y^{(i)} - Q(s^{(i)},a^{(i)},\\mathbf{\\theta}_\\text{critic})\\right)^2 \\\\\n", "$"
"& \\text{with } y^{(i)} = r^{(i)} + \\gamma . \\underset{a'}{\\max}Q(s'^{(i)},a',\\mathbf{\\theta}_\\text{actor})\n",
"\\end{split}\n",
"$\n"
] ]
}, },
{ {
@ -1074,7 +1070,7 @@
"\n", "\n",
"$\n", "$\n",
"\\begin{split}\n", "\\begin{split}\n",
"\\mathcal{L}(\\mathbf{w}, b, \\mathbf{\\alpha}) = \\frac{1}{2}\\mathbf{w}^T \\cdot \\mathbf{w} - \\sum\\limits_{i=1}^{m}{\\alpha^{(i)} \\left(t^{(i)}(\\mathbf{w}^T \\cdot \\mathbf{x}^{(i)} + b) - 1\\right)} \\\\\n", "\\mathcal{L}(\\mathbf{w}, b, \\mathbf{\\alpha}) = \\frac{1}{2}\\mathbf{w}^T \\mathbf{w} - \\sum\\limits_{i=1}^{m}{\\alpha^{(i)} \\left(t^{(i)}(\\mathbf{w}^T \\mathbf{x}^{(i)} + b) - 1\\right)} \\\\\n",
"\\text{with}\\quad \\alpha^{(i)} \\ge 0 \\quad \\text{for }i = 1, 2, \\dots, m\n", "\\text{with}\\quad \\alpha^{(i)} \\ge 0 \\quad \\text{for }i = 1, 2, \\dots, m\n",
"\\end{split}\n", "\\end{split}\n",
"$\n", "$\n",
@ -1084,7 +1080,7 @@
"$ (\\hat{\\mathbf{w}}, \\hat{b}, \\hat{\\mathbf{\\alpha}}) $\n", "$ (\\hat{\\mathbf{w}}, \\hat{b}, \\hat{\\mathbf{\\alpha}}) $\n",
"\n", "\n",
"\n", "\n",
"$ t^{(i)}((\\hat{\\mathbf{w}})^T \\cdot \\mathbf{x}^{(i)} + \\hat{b}) \\ge 1 \\quad \\text{for } i = 1, 2, \\dots, m $\n", "$ t^{(i)}(\\hat{\\mathbf{w}}^T \\mathbf{x}^{(i)} + \\hat{b}) \\ge 1 \\quad \\text{for } i = 1, 2, \\dots, m $\n",
"\n", "\n",
"\n", "\n",
"$ {\\hat{\\alpha}}^{(i)} \\ge 0 \\quad \\text{for } i = 1, 2, \\dots, m $\n", "$ {\\hat{\\alpha}}^{(i)} \\ge 0 \\quad \\text{for } i = 1, 2, \\dots, m $\n",
@ -1093,7 +1089,7 @@
"$ {\\hat{\\alpha}}^{(i)} = 0 $\n", "$ {\\hat{\\alpha}}^{(i)} = 0 $\n",
"\n", "\n",
"\n", "\n",
"$ t^{(i)}((\\hat{\\mathbf{w}})^T \\cdot \\mathbf{x}^{(i)} + \\hat{b}) = 1 $\n", "$ t^{(i)}((\\hat{\\mathbf{w}})^T \\mathbf{x}^{(i)} + \\hat{b}) = 1 $\n",
"\n", "\n",
"\n", "\n",
"$ {\\hat{\\alpha}}^{(i)} = 0 $\n", "$ {\\hat{\\alpha}}^{(i)} = 0 $\n",
@ -1125,7 +1121,7 @@
"\\begin{split}\n", "\\begin{split}\n",
"\\mathcal{L}(\\hat{\\mathbf{w}}, \\hat{b}, \\mathbf{\\alpha}) = \\dfrac{1}{2}\\sum\\limits_{i=1}^{m}{\n", "\\mathcal{L}(\\hat{\\mathbf{w}}, \\hat{b}, \\mathbf{\\alpha}) = \\dfrac{1}{2}\\sum\\limits_{i=1}^{m}{\n",
" \\sum\\limits_{j=1}^{m}{\n", " \\sum\\limits_{j=1}^{m}{\n",
" \\alpha^{(i)} \\alpha^{(j)} t^{(i)} t^{(j)} {\\mathbf{x}^{(i)}}^T \\cdot \\mathbf{x}^{(j)}\n", " \\alpha^{(i)} \\alpha^{(j)} t^{(i)} t^{(j)} {\\mathbf{x}^{(i)}}^T \\mathbf{x}^{(j)}\n",
" }\n", " }\n",
"} \\quad - \\quad \\sum\\limits_{i=1}^{m}{\\alpha^{(i)}}\\\\\n", "} \\quad - \\quad \\sum\\limits_{i=1}^{m}{\\alpha^{(i)}}\\\\\n",
"\\text{with}\\quad \\alpha^{(i)} \\ge 0 \\quad \\text{for }i = 1, 2, \\dots, m\n", "\\text{with}\\quad \\alpha^{(i)} \\ge 0 \\quad \\text{for }i = 1, 2, \\dots, m\n",
@ -1149,13 +1145,13 @@
"$ \\hat{b} $\n", "$ \\hat{b} $\n",
"\n", "\n",
"\n", "\n",
"$ \\hat{b} = 1 - t^{(k)}({\\hat{\\mathbf{w}}}^T \\cdot \\mathbf{x}^{(k)}) $\n", "$ \\hat{b} = t^{(k)} - {\\hat{\\mathbf{w}}}^T \\mathbf{x}^{(k)} $\n",
"\n", "\n",
"\n", "\n",
"**Equation C-5: Bias term estimation using the dual form**\n", "**Equation C-5: Bias term estimation using the dual form**\n",
"\n", "\n",
"$\n", "$\n",
"\\hat{b} = \\dfrac{1}{n_s}\\sum\\limits_{\\scriptstyle i=1 \\atop {\\scriptstyle {\\hat{\\alpha}}^{(i)} > 0}}^{m}{\\left[1 - t^{(i)}({\\hat{\\mathbf{w}}}^T \\cdot \\mathbf{x}^{(i)})\\right]}\n", "\\hat{b} = \\dfrac{1}{n_s}\\sum\\limits_{\\scriptstyle i=1 \\atop {\\scriptstyle {\\hat{\\alpha}}^{(i)} > 0}}^{m}{\\left[t^{(i)} - {\\hat{\\mathbf{w}}}^T \\mathbf{x}^{(i)}\\right]}\n",
"$" "$"
] ]
}, },
@ -1346,21 +1342,21 @@
], ],
"metadata": { "metadata": {
"kernelspec": { "kernelspec": {
"display_name": "Python 2", "display_name": "Python 3",
"language": "python", "language": "python",
"name": "python2" "name": "python3"
}, },
"language_info": { "language_info": {
"codemirror_mode": { "codemirror_mode": {
"name": "ipython", "name": "ipython",
"version": 2 "version": 3
}, },
"file_extension": ".py", "file_extension": ".py",
"mimetype": "text/x-python", "mimetype": "text/x-python",
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython2", "pygments_lexer": "ipython3",
"version": "2.7.12" "version": "3.6.5"
} }
}, },
"nbformat": 4, "nbformat": 4,