From e90459c24c3af3bc00b17c2225330c92dd91e56e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20Geron?= Date: Thu, 27 Dec 2018 12:56:48 +0800 Subject: [PATCH] Fix sigmoid activation function name --- 10_introduction_to_artificial_neural_networks.ipynb | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/10_introduction_to_artificial_neural_networks.ipynb b/10_introduction_to_artificial_neural_networks.ipynb index 6df82f8..26808ef 100644 --- a/10_introduction_to_artificial_neural_networks.ipynb +++ b/10_introduction_to_artificial_neural_networks.ipynb @@ -160,7 +160,7 @@ "metadata": {}, "outputs": [], "source": [ - "def logit(z):\n", + "def sigmoid(z):\n", " return 1 / (1 + np.exp(-z))\n", "\n", "def relu(z):\n", @@ -181,8 +181,8 @@ "plt.figure(figsize=(11,4))\n", "\n", "plt.subplot(121)\n", - "plt.plot(z, np.sign(z), \"r-\", linewidth=2, label=\"Step\")\n", - "plt.plot(z, logit(z), \"g--\", linewidth=2, label=\"Logit\")\n", + "plt.plot(z, np.sign(z), \"r-\", linewidth=1, label=\"Step\")\n", + "plt.plot(z, sigmoid(z), \"g--\", linewidth=2, label=\"Sigmoid\")\n", "plt.plot(z, np.tanh(z), \"b-\", linewidth=2, label=\"Tanh\")\n", "plt.plot(z, relu(z), \"m-.\", linewidth=2, label=\"ReLU\")\n", "plt.grid(True)\n", @@ -191,10 +191,10 @@ "plt.axis([-5, 5, -1.2, 1.2])\n", "\n", "plt.subplot(122)\n", - "plt.plot(z, derivative(np.sign, z), \"r-\", linewidth=2, label=\"Step\")\n", + "plt.plot(z, derivative(np.sign, z), \"r-\", linewidth=1, label=\"Step\")\n", "plt.plot(0, 0, \"ro\", markersize=5)\n", "plt.plot(0, 0, \"rx\", markersize=10)\n", - "plt.plot(z, derivative(logit, z), \"g--\", linewidth=2, label=\"Logit\")\n", + "plt.plot(z, derivative(sigmoid, z), \"g--\", linewidth=2, label=\"Sigmoid\")\n", "plt.plot(z, derivative(np.tanh, z), \"b-\", linewidth=2, label=\"Tanh\")\n", "plt.plot(z, derivative(relu, z), \"m-.\", linewidth=2, label=\"ReLU\")\n", "plt.grid(True)\n", @@ -215,9 +215,6 @@ "def heaviside(z):\n", " return (z >= 0).astype(z.dtype)\n", "\n", - "def sigmoid(z):\n", - " return 1/(1+np.exp(-z))\n", - "\n", "def mlp_xor(x1, x2, activation=heaviside):\n", " return activation(-activation(x1 + x2 - 1.5) + activation(x1 + x2 - 0.5) - 0.5)" ]