diff --git a/math_linear_algebra.ipynb b/math_linear_algebra.ipynb
index 4233cb7..26ed9c5 100644
--- a/math_linear_algebra.ipynb
+++ b/math_linear_algebra.ipynb
@@ -17,10 +17,10 @@
"source": [
"
\n",
" \n",
- " \n",
+ " \n",
" | \n",
" \n",
- " \n",
+ " \n",
" | \n",
"
"
]
@@ -77,7 +77,7 @@
},
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
@@ -93,11 +93,12 @@
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
+ "\n",
"video = np.array([10.5, 5.2, 3.25, 7.0])\n",
"video"
]
@@ -111,7 +112,7 @@
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
@@ -129,7 +130,7 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
@@ -146,11 +147,10 @@
},
{
"cell_type": "code",
- "execution_count": 6,
+ "execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
- "%matplotlib inline\n",
"import matplotlib.pyplot as plt"
]
},
@@ -164,13 +164,8 @@
},
{
"cell_type": "code",
- "execution_count": 7,
- "metadata": {
- "collapsed": true,
- "jupyter": {
- "outputs_hidden": true
- }
- },
+ "execution_count": 6,
+ "metadata": {},
"outputs": [],
"source": [
"u = np.array([2, 5])\n",
@@ -186,7 +181,7 @@
},
{
"cell_type": "code",
- "execution_count": 8,
+ "execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
@@ -206,7 +201,7 @@
},
{
"cell_type": "code",
- "execution_count": 9,
+ "execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
@@ -225,7 +220,7 @@
},
{
"cell_type": "code",
- "execution_count": 10,
+ "execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
@@ -246,7 +241,7 @@
},
{
"cell_type": "code",
- "execution_count": 11,
+ "execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
@@ -263,7 +258,7 @@
},
{
"cell_type": "code",
- "execution_count": 12,
+ "execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
@@ -285,7 +280,7 @@
},
{
"cell_type": "code",
- "execution_count": 13,
+ "execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
@@ -311,12 +306,12 @@
"\n",
"$\\left \\Vert \\textbf{u} \\right \\| = \\sqrt{\\sum_{i}{\\textbf{u}_i}^2}$\n",
"\n",
- "We could implement this easily in pure python, recalling that $\\sqrt x = x^{\\frac{1}{2}}$"
+ "That's the square root of the sum of all the squares of the components of $\\textbf{u}$. We could implement this easily in pure python, recalling that $\\sqrt x = x^{\\frac{1}{2}}$"
]
},
{
"cell_type": "code",
- "execution_count": 14,
+ "execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
@@ -337,11 +332,12 @@
},
{
"cell_type": "code",
- "execution_count": 15,
+ "execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"import numpy.linalg as LA\n",
+ "\n",
"LA.norm(u)"
]
},
@@ -354,7 +350,7 @@
},
{
"cell_type": "code",
- "execution_count": 16,
+ "execution_count": 15,
"metadata": {},
"outputs": [],
"source": [
@@ -362,6 +358,7 @@
"plt.gca().add_artist(plt.Circle((0,0), radius, color=\"#DDDDDD\"))\n",
"plot_vector2d(u, color=\"red\")\n",
"plt.axis([0, 8.7, 0, 6])\n",
+ "plt.gca().set_aspect(\"equal\")\n",
"plt.grid()\n",
"plt.show()"
]
@@ -383,7 +380,7 @@
},
{
"cell_type": "code",
- "execution_count": 17,
+ "execution_count": 16,
"metadata": {},
"outputs": [],
"source": [
@@ -402,7 +399,7 @@
},
{
"cell_type": "code",
- "execution_count": 18,
+ "execution_count": 17,
"metadata": {
"scrolled": true
},
@@ -414,6 +411,7 @@
"plot_vector2d(u, origin=v, color=\"r\", linestyle=\"dotted\")\n",
"plot_vector2d(u+v, color=\"g\")\n",
"plt.axis([0, 9, 0, 7])\n",
+ "plt.gca().set_aspect(\"equal\")\n",
"plt.text(0.7, 3, \"u\", color=\"r\", fontsize=18)\n",
"plt.text(4, 3, \"u\", color=\"r\", fontsize=18)\n",
"plt.text(1.8, 0.2, \"v\", color=\"b\", fontsize=18)\n",
@@ -441,7 +439,7 @@
},
{
"cell_type": "code",
- "execution_count": 19,
+ "execution_count": 18,
"metadata": {},
"outputs": [],
"source": [
@@ -468,6 +466,7 @@
"plt.text(3.5, 0.4, \"v\", color=\"r\", fontsize=18)\n",
"\n",
"plt.axis([0, 6, 0, 5])\n",
+ "plt.gca().set_aspect(\"equal\")\n",
"plt.grid()\n",
"plt.show()"
]
@@ -489,7 +488,7 @@
},
{
"cell_type": "code",
- "execution_count": 20,
+ "execution_count": 19,
"metadata": {},
"outputs": [],
"source": [
@@ -507,7 +506,7 @@
},
{
"cell_type": "code",
- "execution_count": 21,
+ "execution_count": 20,
"metadata": {},
"outputs": [],
"source": [
@@ -530,6 +529,7 @@
"plot_vector2d(k * t3, color=\"b\", linestyle=\":\")\n",
"\n",
"plt.axis([0, 9, 0, 9])\n",
+ "plt.gca().set_aspect(\"equal\")\n",
"plt.grid()\n",
"plt.show()"
]
@@ -567,17 +567,18 @@
},
{
"cell_type": "code",
- "execution_count": 22,
+ "execution_count": 21,
"metadata": {},
"outputs": [],
"source": [
- "plt.gca().add_artist(plt.Circle((0,0),1,color='c'))\n",
+ "plt.gca().add_artist(plt.Circle((0, 0), 1, color='c'))\n",
"plt.plot(0, 0, \"ko\")\n",
- "plot_vector2d(v / LA.norm(v), color=\"k\")\n",
- "plot_vector2d(v, color=\"b\", linestyle=\":\")\n",
- "plt.text(0.3, 0.3, \"$\\hat{u}$\", color=\"k\", fontsize=18)\n",
+ "plot_vector2d(v / LA.norm(v), color=\"k\", zorder=10)\n",
+ "plot_vector2d(v, color=\"b\", linestyle=\":\", zorder=15)\n",
+ "plt.text(0.3, 0.3, r\"$\\hat{u}$\", color=\"k\", fontsize=18)\n",
"plt.text(1.5, 0.7, \"$u$\", color=\"b\", fontsize=18)\n",
"plt.axis([-1.5, 5.5, -1.5, 3.5])\n",
+ "plt.gca().set_aspect(\"equal\")\n",
"plt.grid()\n",
"plt.show()"
]
@@ -604,7 +605,7 @@
},
{
"cell_type": "code",
- "execution_count": 23,
+ "execution_count": 22,
"metadata": {},
"outputs": [],
"source": [
@@ -618,16 +619,16 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "But a *much* more efficient implementation is provided by NumPy with the `dot` function:"
+ "But a *much* more efficient implementation is provided by NumPy with the `np.dot()` function:"
]
},
{
"cell_type": "code",
- "execution_count": 24,
+ "execution_count": 23,
"metadata": {},
"outputs": [],
"source": [
- "np.dot(u,v)"
+ "np.dot(u, v)"
]
},
{
@@ -639,7 +640,7 @@
},
{
"cell_type": "code",
- "execution_count": 25,
+ "execution_count": 24,
"metadata": {},
"outputs": [],
"source": [
@@ -655,7 +656,7 @@
},
{
"cell_type": "code",
- "execution_count": 26,
+ "execution_count": 25,
"metadata": {},
"outputs": [],
"source": [
@@ -694,13 +695,13 @@
},
{
"cell_type": "code",
- "execution_count": 27,
+ "execution_count": 26,
"metadata": {},
"outputs": [],
"source": [
"def vector_angle(u, v):\n",
" cos_theta = u.dot(v) / LA.norm(u) / LA.norm(v)\n",
- " return np.arccos(np.clip(cos_theta, -1, 1))\n",
+ " return np.arccos(cos_theta.clip(-1, 1))\n",
"\n",
"theta = vector_angle(u, v)\n",
"print(\"Angle =\", theta, \"radians\")\n",
@@ -730,7 +731,7 @@
},
{
"cell_type": "code",
- "execution_count": 28,
+ "execution_count": 27,
"metadata": {},
"outputs": [],
"source": [
@@ -750,6 +751,7 @@
"plt.text(0.8, 3, \"$u$\", color=\"r\", fontsize=18)\n",
"\n",
"plt.axis([0, 8, 0, 5.5])\n",
+ "plt.gca().set_aspect(\"equal\")\n",
"plt.grid()\n",
"plt.show()"
]
@@ -778,7 +780,7 @@
},
{
"cell_type": "code",
- "execution_count": 29,
+ "execution_count": 28,
"metadata": {},
"outputs": [],
"source": [
@@ -797,7 +799,7 @@
},
{
"cell_type": "code",
- "execution_count": 30,
+ "execution_count": 29,
"metadata": {},
"outputs": [],
"source": [
@@ -829,7 +831,7 @@
},
{
"cell_type": "code",
- "execution_count": 31,
+ "execution_count": 30,
"metadata": {},
"outputs": [],
"source": [
@@ -845,7 +847,7 @@
},
{
"cell_type": "code",
- "execution_count": 32,
+ "execution_count": 31,
"metadata": {},
"outputs": [],
"source": [
@@ -872,7 +874,7 @@
},
{
"cell_type": "code",
- "execution_count": 33,
+ "execution_count": 32,
"metadata": {},
"outputs": [],
"source": [
@@ -888,7 +890,7 @@
},
{
"cell_type": "code",
- "execution_count": 34,
+ "execution_count": 33,
"metadata": {},
"outputs": [],
"source": [
@@ -904,7 +906,7 @@
},
{
"cell_type": "code",
- "execution_count": 35,
+ "execution_count": 34,
"metadata": {},
"outputs": [],
"source": [
@@ -920,7 +922,7 @@
},
{
"cell_type": "code",
- "execution_count": 36,
+ "execution_count": 35,
"metadata": {},
"outputs": [],
"source": [
@@ -929,7 +931,7 @@
},
{
"cell_type": "code",
- "execution_count": 37,
+ "execution_count": 36,
"metadata": {},
"outputs": [],
"source": [
@@ -1000,7 +1002,7 @@
},
{
"cell_type": "code",
- "execution_count": 38,
+ "execution_count": 37,
"metadata": {},
"outputs": [],
"source": [
@@ -1016,7 +1018,7 @@
},
{
"cell_type": "code",
- "execution_count": 39,
+ "execution_count": 38,
"metadata": {},
"outputs": [],
"source": [
@@ -1045,7 +1047,7 @@
},
{
"cell_type": "code",
- "execution_count": 40,
+ "execution_count": 39,
"metadata": {},
"outputs": [],
"source": [
@@ -1080,7 +1082,7 @@
},
{
"cell_type": "code",
- "execution_count": 41,
+ "execution_count": 40,
"metadata": {},
"outputs": [],
"source": [
@@ -1090,7 +1092,7 @@
},
{
"cell_type": "code",
- "execution_count": 42,
+ "execution_count": 41,
"metadata": {},
"outputs": [],
"source": [
@@ -1099,7 +1101,7 @@
},
{
"cell_type": "code",
- "execution_count": 43,
+ "execution_count": 42,
"metadata": {},
"outputs": [],
"source": [
@@ -1115,7 +1117,7 @@
},
{
"cell_type": "code",
- "execution_count": 44,
+ "execution_count": 43,
"metadata": {},
"outputs": [],
"source": [
@@ -1131,7 +1133,7 @@
},
{
"cell_type": "code",
- "execution_count": 45,
+ "execution_count": 44,
"metadata": {},
"outputs": [],
"source": [
@@ -1142,7 +1144,7 @@
},
{
"cell_type": "code",
- "execution_count": 46,
+ "execution_count": 45,
"metadata": {},
"outputs": [],
"source": [
@@ -1174,7 +1176,7 @@
},
{
"cell_type": "code",
- "execution_count": 47,
+ "execution_count": 46,
"metadata": {},
"outputs": [],
"source": [
@@ -1190,7 +1192,7 @@
},
{
"cell_type": "code",
- "execution_count": 48,
+ "execution_count": 47,
"metadata": {},
"outputs": [],
"source": [
@@ -1208,7 +1210,7 @@
},
{
"cell_type": "code",
- "execution_count": 49,
+ "execution_count": 48,
"metadata": {},
"outputs": [],
"source": [
@@ -1217,7 +1219,7 @@
},
{
"cell_type": "code",
- "execution_count": 50,
+ "execution_count": 49,
"metadata": {},
"outputs": [],
"source": [
@@ -1233,7 +1235,7 @@
},
{
"cell_type": "code",
- "execution_count": 51,
+ "execution_count": 50,
"metadata": {},
"outputs": [],
"source": [
@@ -1242,7 +1244,7 @@
},
{
"cell_type": "code",
- "execution_count": 52,
+ "execution_count": 51,
"metadata": {},
"outputs": [],
"source": [
@@ -1298,7 +1300,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "Let's multiply two matrices in NumPy, using `ndarray`'s `dot` method:\n",
+ "Let's multiply two matrices in NumPy, using `ndarray`'s `np.matmul()` function:\n",
"\n",
"$E = AD = \\begin{bmatrix}\n",
" 10 & 20 & 30 \\\\\n",
@@ -1317,7 +1319,7 @@
},
{
"cell_type": "code",
- "execution_count": 53,
+ "execution_count": 52,
"metadata": {},
"outputs": [],
"source": [
@@ -1326,10 +1328,42 @@
" [11, 13, 17, 19],\n",
" [23, 29, 31, 37]\n",
" ])\n",
- "E = A.dot(D)\n",
+ "E = np.matmul(A, D)\n",
"E"
]
},
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Python 3.5 [introduced](https://docs.python.org/3/whatsnew/3.5.html#pep-465-a-dedicated-infix-operator-for-matrix-multiplication) the `@` infix operator for matrix multiplication, and NumPy 1.10 added support for it. `A @ D` is equivalent to `np.matmul(A, D)`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 53,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "A @ D"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The `@` operator also works for vectors: `u @ v` computes the dot product of `u` and `v`:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 54,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "u @ v"
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {},
@@ -1339,7 +1373,7 @@
},
{
"cell_type": "code",
- "execution_count": 54,
+ "execution_count": 55,
"metadata": {},
"outputs": [],
"source": [
@@ -1348,7 +1382,7 @@
},
{
"cell_type": "code",
- "execution_count": 55,
+ "execution_count": 56,
"metadata": {},
"outputs": [],
"source": [
@@ -1366,12 +1400,12 @@
},
{
"cell_type": "code",
- "execution_count": 56,
+ "execution_count": 57,
"metadata": {},
"outputs": [],
"source": [
"try:\n",
- " D.dot(A)\n",
+ " D @ A\n",
"except ValueError as e:\n",
" print(\"ValueError:\", e)"
]
@@ -1387,7 +1421,7 @@
},
{
"cell_type": "code",
- "execution_count": 57,
+ "execution_count": 58,
"metadata": {},
"outputs": [],
"source": [
@@ -1396,16 +1430,16 @@
" [4,1],\n",
" [9,3]\n",
" ])\n",
- "A.dot(F)"
+ "A @ F"
]
},
{
"cell_type": "code",
- "execution_count": 58,
+ "execution_count": 59,
"metadata": {},
"outputs": [],
"source": [
- "F.dot(A)"
+ "F @ A"
]
},
{
@@ -1417,7 +1451,7 @@
},
{
"cell_type": "code",
- "execution_count": 59,
+ "execution_count": 60,
"metadata": {},
"outputs": [],
"source": [
@@ -1426,16 +1460,16 @@
" [2, 5, 1, 0, 5],\n",
" [9, 11, 17, 21, 0],\n",
" [0, 1, 0, 1, 2]])\n",
- "A.dot(D).dot(G) # (AB)G"
+ "(A @ D) @ G # (AD)G"
]
},
{
"cell_type": "code",
- "execution_count": 60,
+ "execution_count": 61,
"metadata": {},
"outputs": [],
"source": [
- "A.dot(D.dot(G)) # A(BG)"
+ "A @ (D @ G) # A(DG)"
]
},
{
@@ -1445,22 +1479,22 @@
"It is also ***distributive* over addition** of matrices, meaning that $(Q + R)S = QS + RS$. For example:"
]
},
- {
- "cell_type": "code",
- "execution_count": 61,
- "metadata": {},
- "outputs": [],
- "source": [
- "(A + B).dot(D)"
- ]
- },
{
"cell_type": "code",
"execution_count": 62,
"metadata": {},
"outputs": [],
"source": [
- "A.dot(D) + B.dot(D)"
+ "(A + B) @ D"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 63,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "A @ D + B @ D"
]
},
{
@@ -1478,22 +1512,22 @@
"For example:"
]
},
- {
- "cell_type": "code",
- "execution_count": 63,
- "metadata": {},
- "outputs": [],
- "source": [
- "A.dot(np.eye(3))"
- ]
- },
{
"cell_type": "code",
"execution_count": 64,
"metadata": {},
"outputs": [],
"source": [
- "np.eye(2).dot(A)"
+ "A @ np.eye(3)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 65,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "np.eye(2) @ A"
]
},
{
@@ -1505,7 +1539,7 @@
},
{
"cell_type": "code",
- "execution_count": 65,
+ "execution_count": 66,
"metadata": {
"scrolled": true
},
@@ -1514,38 +1548,6 @@
"A * B # NOT a matrix multiplication"
]
},
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "**The @ infix operator**\n",
- "\n",
- "Python 3.5 [introduced](https://docs.python.org/3/whatsnew/3.5.html#pep-465-a-dedicated-infix-operator-for-matrix-multiplication) the `@` infix operator for matrix multiplication, and NumPy 1.10 added support for it. If you are using Python 3.5+ and NumPy 1.10+, you can simply write `A @ D` instead of `A.dot(D)`, making your code much more readable (but less portable). This operator also works for vector dot products."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 66,
- "metadata": {},
- "outputs": [],
- "source": [
- "import sys\n",
- "print(\"Python version: {}.{}.{}\".format(*sys.version_info))\n",
- "print(\"Numpy version:\", np.version.version)\n",
- "\n",
- "# Uncomment the following line if your Python version is ≥3.5\n",
- "# and your NumPy version is ≥1.10:\n",
- "\n",
- "#A @ D"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "Note: `Q @ R` is actually equivalent to `Q.__matmul__(R)` which is implemented by NumPy as `np.matmul(Q, R)`, not as `Q.dot(R)`. The main difference is that `matmul` does not support scalar multiplication, while `dot` does, so you can write `Q.dot(3)`, which is equivalent to `Q * 3`, but you cannot write `Q @ 3` ([more details](http://stackoverflow.com/a/34142617/38626))."
- ]
- },
{
"cell_type": "markdown",
"metadata": {},
@@ -1645,7 +1647,7 @@
"metadata": {},
"outputs": [],
"source": [
- "(A.dot(D)).T"
+ "(A @ D).T"
]
},
{
@@ -1654,7 +1656,7 @@
"metadata": {},
"outputs": [],
"source": [
- "D.T.dot(A.T)"
+ "D.T @ A.T"
]
},
{
@@ -1679,7 +1681,7 @@
"metadata": {},
"outputs": [],
"source": [
- "D.dot(D.T)"
+ "D @ D.T"
]
},
{
@@ -1830,6 +1832,8 @@
"x_coords_P, y_coords_P = P\n",
"plt.scatter(x_coords_P, y_coords_P)\n",
"plt.axis([0, 5, 0, 4])\n",
+ "plt.gca().set_aspect(\"equal\")\n",
+ "plt.grid()\n",
"plt.show()"
]
},
@@ -1851,6 +1855,7 @@
"plt.plot(x_coords_P, y_coords_P, \"bo\")\n",
"plt.plot(x_coords_P, y_coords_P, \"b--\")\n",
"plt.axis([0, 5, 0, 4])\n",
+ "plt.gca().set_aspect(\"equal\")\n",
"plt.grid()\n",
"plt.show()"
]
@@ -1871,6 +1876,7 @@
"from matplotlib.patches import Polygon\n",
"plt.gca().add_artist(Polygon(P.T))\n",
"plt.axis([0, 5, 0, 4])\n",
+ "plt.gca().set_aspect(\"equal\")\n",
"plt.grid()\n",
"plt.show()"
]
@@ -1915,9 +1921,10 @@
"plt.text(2.5, 0.5, \"$H_{*,1}$\", color=\"k\", fontsize=18)\n",
"plt.text(4.1, 3.5, \"$H_{*,2}$\", color=\"k\", fontsize=18)\n",
"plt.text(0.4, 2.6, \"$H_{*,3}$\", color=\"k\", fontsize=18)\n",
- "plt.text(4.4, 0.2, \"$H_{*,4}$\", color=\"k\", fontsize=18)\n",
+ "plt.text(4.3, 0.2, \"$H_{*,4}$\", color=\"k\", fontsize=18)\n",
"\n",
"plt.axis([0, 5, 0, 4])\n",
+ "plt.gca().set_aspect(\"equal\")\n",
"plt.grid()\n",
"plt.show()"
]
@@ -1947,6 +1954,7 @@
" plot_vector2d(vector, origin=origin)\n",
"\n",
"plt.axis([0, 5, 0, 4])\n",
+ "plt.gca().set_aspect(\"equal\")\n",
"plt.grid()\n",
"plt.show()"
]
@@ -1988,9 +1996,12 @@
" plot_vector2d(vector_after, color=\"red\", linestyle=\"-\")\n",
" plt.gca().add_artist(Polygon(P_before.T, alpha=0.2))\n",
" plt.gca().add_artist(Polygon(P_after.T, alpha=0.3, color=\"r\"))\n",
+ " plt.plot(P_before[0], P_before[1], \"b--\", alpha=0.5)\n",
+ " plt.plot(P_after[0], P_after[1], \"r--\", alpha=0.5)\n",
" plt.text(P_before[0].mean(), P_before[1].mean(), text_before, fontsize=18, color=\"blue\")\n",
" plt.text(P_after[0].mean(), P_after[1].mean(), text_after, fontsize=18, color=\"red\")\n",
" plt.axis(axis)\n",
+ " plt.gca().set_aspect(\"equal\")\n",
" plt.grid()\n",
"\n",
"P_rescaled = 0.60 * P\n",
@@ -2011,12 +2022,7 @@
{
"cell_type": "code",
"execution_count": 90,
- "metadata": {
- "collapsed": true,
- "jupyter": {
- "outputs_hidden": true
- }
- },
+ "metadata": {},
"outputs": [],
"source": [
"U = np.array([[1, 0]])"
@@ -2035,7 +2041,7 @@
"metadata": {},
"outputs": [],
"source": [
- "U.dot(P)"
+ "U @ P"
]
},
{
@@ -2052,7 +2058,7 @@
"outputs": [],
"source": [
"def plot_projection(U, P):\n",
- " U_P = U.dot(P)\n",
+ " U_P = U @ P\n",
" \n",
" axis_end = 100 * U\n",
" plot_vector2d(axis_end[0], color=\"black\")\n",
@@ -2060,10 +2066,12 @@
" plt.gca().add_artist(Polygon(P.T, alpha=0.2))\n",
" for vector, proj_coordinate in zip(P.T, U_P.T):\n",
" proj_point = proj_coordinate * U\n",
- " plt.plot(proj_point[0][0], proj_point[0][1], \"ro\")\n",
- " plt.plot([vector[0], proj_point[0][0]], [vector[1], proj_point[0][1]], \"r--\")\n",
+ " plt.plot(proj_point[0][0], proj_point[0][1], \"ro\", zorder=10)\n",
+ " plt.plot([vector[0], proj_point[0][0]], [vector[1], proj_point[0][1]],\n",
+ " \"r--\", zorder=10)\n",
"\n",
" plt.axis([0, 5, 0, 4])\n",
+ " plt.gca().set_aspect(\"equal\")\n",
" plt.grid()\n",
" plt.show()\n",
"\n",
@@ -2133,7 +2141,7 @@
"metadata": {},
"outputs": [],
"source": [
- "V.dot(P)"
+ "V @ P"
]
},
{
@@ -2149,7 +2157,7 @@
"metadata": {},
"outputs": [],
"source": [
- "P_rotated = V.dot(P)\n",
+ "P_rotated = V @ P\n",
"plot_transformation(P, P_rotated, \"$P$\", \"$VP$\", [-2, 6, -2, 4], arrows=True)\n",
"plt.show()"
]
@@ -2207,7 +2215,7 @@
" [1, 1.5],\n",
" [0, 1]\n",
" ])\n",
- "plot_transformation(P, F_shear.dot(P), \"$P$\", \"$F_{shear} P$\",\n",
+ "plot_transformation(P, F_shear @ P, \"$P$\", \"$F_{shear} P$\",\n",
" axis=[0, 10, 0, 7])\n",
"plt.show()"
]
@@ -2229,7 +2237,7 @@
" [0, 0, 1, 1],\n",
" [0, 1, 1, 0]\n",
" ])\n",
- "plot_transformation(Square, F_shear.dot(Square), \"$Square$\", \"$F_{shear} Square$\",\n",
+ "plot_transformation(Square, F_shear @ Square, \"$Square$\", \"$F_{shear} Square$\",\n",
" axis=[0, 2.6, 0, 1.8])\n",
"plt.show()"
]
@@ -2251,7 +2259,7 @@
" [1.4, 0],\n",
" [0, 1/1.4]\n",
" ])\n",
- "plot_transformation(P, F_squeeze.dot(P), \"$P$\", \"$F_{squeeze} P$\",\n",
+ "plot_transformation(P, F_squeeze @ P, \"$P$\", \"$F_{squeeze} P$\",\n",
" axis=[0, 7, 0, 5])\n",
"plt.show()"
]
@@ -2269,7 +2277,7 @@
"metadata": {},
"outputs": [],
"source": [
- "plot_transformation(Square, F_squeeze.dot(Square), \"$Square$\", \"$F_{squeeze} Square$\",\n",
+ "plot_transformation(Square, F_squeeze @ Square, \"$Square$\", \"$F_{squeeze} Square$\",\n",
" axis=[0, 1.8, 0, 1.2])\n",
"plt.show()"
]
@@ -2291,7 +2299,7 @@
" [1, 0],\n",
" [0, -1]\n",
" ])\n",
- "plot_transformation(P, F_reflect.dot(P), \"$P$\", \"$F_{reflect} P$\",\n",
+ "plot_transformation(P, F_reflect @ P, \"$P$\", \"$F_{reflect} P$\",\n",
" axis=[-2, 9, -4.5, 4.5])\n",
"plt.show()"
]
@@ -2316,8 +2324,8 @@
" [1, -1.5],\n",
" [0, 1]\n",
"])\n",
- "P_sheared = F_shear.dot(P)\n",
- "P_unsheared = F_inv_shear.dot(P_sheared)\n",
+ "P_sheared = F_shear @ P\n",
+ "P_unsheared = F_inv_shear @ P_sheared\n",
"plot_transformation(P_sheared, P_unsheared, \"$P_{sheared}$\", \"$P_{unsheared}$\",\n",
" axis=[0, 10, 0, 7])\n",
"plt.plot(P[0], P[1], \"b--\")\n",
@@ -2328,7 +2336,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "We applied a shear mapping on $P$, just like we did before, but then we applied a second transformation to the result, and *lo and behold* this had the effect of coming back to the original $P$ (we plotted the original $P$'s outline to double check). The second transformation is the inverse of the first one.\n",
+ "We applied a shear mapping on $P$, just like we did before, but then we applied a second transformation to the result, and *lo and behold* this had the effect of coming back to the original $P$ (I've plotted the original $P$'s outline to double check). The second transformation is the inverse of the first one.\n",
"\n",
"We defined the inverse matrix $F_{shear}^{-1}$ manually this time, but NumPy provides an `inv` function to compute a matrix's inverse, so we could have written instead:"
]
@@ -2360,6 +2368,8 @@
" [0, 1, 1, 0, 0, 0.1, 1.1, 1.0, 1.1, 1.1, 1.0, 1.1, 0.1, 0, 0.1, 0.1],\n",
" \"r-\")\n",
"plt.axis([-0.5, 2.1, -0.5, 1.5])\n",
+ "plt.gca().set_aspect(\"equal\")\n",
+ "plt.grid()\n",
"plt.show()"
]
},
@@ -2382,7 +2392,7 @@
" [1, 0],\n",
" [0, 0]\n",
" ])\n",
- "plot_transformation(P, F_project.dot(P), \"$P$\", \"$F_{project} \\cdot P$\",\n",
+ "plot_transformation(P, F_project @ P, \"$P$\", r\"$F_{project} \\cdot P$\",\n",
" axis=[0, 6, -1, 4])\n",
"plt.show()"
]
@@ -2424,7 +2434,7 @@
" [np.cos(angle30)**2, np.sin(2*angle30)/2],\n",
" [np.sin(2*angle30)/2, np.sin(angle30)**2]\n",
" ])\n",
- "plot_transformation(P, F_project_30.dot(P), \"$P$\", \"$F_{project\\_30} \\cdot P$\",\n",
+ "plot_transformation(P, F_project_30 @ P, \"$P$\", r\"$F_{project\\_30} \\cdot P$\",\n",
" axis=[0, 6, -1, 4])\n",
"plt.show()"
]
@@ -2462,7 +2472,7 @@
"metadata": {},
"outputs": [],
"source": [
- "F_shear.dot(LA.inv(F_shear))"
+ "F_shear @ LA.inv(F_shear)"
]
},
{
@@ -2506,7 +2516,7 @@
" [0, -2],\n",
" [-1/2, 0]\n",
" ])\n",
- "plot_transformation(P, F_involution.dot(P), \"$P$\", \"$F_{involution} \\cdot P$\",\n",
+ "plot_transformation(P, F_involution @ P, \"$P$\", r\"$F_{involution} \\cdot P$\",\n",
" axis=[-8, 5, -4, 4])\n",
"plt.show()"
]
@@ -2532,7 +2542,7 @@
"metadata": {},
"outputs": [],
"source": [
- "F_reflect.dot(F_reflect.T)"
+ "F_reflect @ F_reflect.T"
]
},
{
@@ -2668,7 +2678,7 @@
" [0.5, 0],\n",
" [0, 0.5]\n",
" ])\n",
- "plot_transformation(P, F_scale.dot(P), \"$P$\", \"$F_{scale} \\cdot P$\",\n",
+ "plot_transformation(P, F_scale @ P, \"$P$\", r\"$F_{scale} \\cdot P$\",\n",
" axis=[0, 6, -1, 4])\n",
"plt.show()"
]
@@ -2721,7 +2731,7 @@
"metadata": {},
"outputs": [],
"source": [
- "P_squeezed_then_sheared = F_shear.dot(F_squeeze.dot(P))"
+ "P_squeezed_then_sheared = F_shear @ (F_squeeze @ P)"
]
},
{
@@ -2737,7 +2747,7 @@
"metadata": {},
"outputs": [],
"source": [
- "P_squeezed_then_sheared = (F_shear.dot(F_squeeze)).dot(P)"
+ "P_squeezed_then_sheared = F_shear @ F_squeeze @ P"
]
},
{
@@ -2752,16 +2762,11 @@
{
"cell_type": "code",
"execution_count": 122,
- "metadata": {
- "collapsed": true,
- "jupyter": {
- "outputs_hidden": true
- }
- },
+ "metadata": {},
"outputs": [],
"source": [
- "F_squeeze_then_shear = F_shear.dot(F_squeeze)\n",
- "P_squeezed_then_sheared = F_squeeze_then_shear.dot(P)"
+ "F_squeeze_then_shear = F_shear @ F_squeeze\n",
+ "P_squeezed_then_sheared = F_squeeze_then_shear @ P"
]
},
{
@@ -2788,7 +2793,7 @@
"metadata": {},
"outputs": [],
"source": [
- "LA.inv(F_shear.dot(F_squeeze)) == LA.inv(F_squeeze).dot(LA.inv(F_shear))"
+ "LA.inv(F_shear @ F_squeeze) == LA.inv(F_squeeze) @ LA.inv(F_shear)"
]
},
{
@@ -2855,7 +2860,7 @@
"metadata": {},
"outputs": [],
"source": [
- "U.dot(np.diag(S_diag)).dot(V_T)"
+ "U @ np.diag(S_diag) @ V_T"
]
},
{
@@ -2880,7 +2885,7 @@
"metadata": {},
"outputs": [],
"source": [
- "plot_transformation(Square, V_T.dot(Square), \"$Square$\", \"$V^T \\cdot Square$\",\n",
+ "plot_transformation(Square, V_T @ Square, \"$Square$\", r\"$V^T \\cdot Square$\",\n",
" axis=[-0.5, 3.5 , -1.5, 1.5])\n",
"plt.show()"
]
@@ -2898,7 +2903,9 @@
"metadata": {},
"outputs": [],
"source": [
- "plot_transformation(V_T.dot(Square), S.dot(V_T).dot(Square), \"$V^T \\cdot Square$\", \"$\\Sigma \\cdot V^T \\cdot Square$\",\n",
+ "plot_transformation(V_T @ Square, S @ V_T @ Square,\n",
+ " r\"$V^T \\cdot Square$\",\n",
+ " r\"$\\Sigma \\cdot V^T \\cdot Square$\",\n",
" axis=[-0.5, 3.5 , -1.5, 1.5])\n",
"plt.show()"
]
@@ -2916,7 +2923,9 @@
"metadata": {},
"outputs": [],
"source": [
- "plot_transformation(S.dot(V_T).dot(Square), U.dot(S).dot(V_T).dot(Square),\"$\\Sigma \\cdot V^T \\cdot Square$\", \"$U \\cdot \\Sigma \\cdot V^T \\cdot Square$\",\n",
+ "plot_transformation(S @ V_T @ Square, U @ S @ V_T @ Square,\n",
+ " r\"$\\Sigma \\cdot V^T \\cdot Square$\",\n",
+ " r\"$U \\cdot \\Sigma \\cdot V^T \\cdot Square$\",\n",
" axis=[-0.5, 3.5 , -1.5, 1.5])\n",
"plt.show()"
]
@@ -3019,7 +3028,7 @@
" [ 10, 20, 30],\n",
" [ 1, 2, 3],\n",
" ])\n",
- "np.trace(D)"
+ "D.trace()"
]
},
{
@@ -3042,7 +3051,7 @@
"metadata": {},
"outputs": [],
"source": [
- "np.trace(F_project)"
+ "F_project.trace()"
]
},
{