Replace np.round(a) with a.round(), and other similar changes

main
Aurélien Geron 2021-11-01 14:42:42 +13:00
parent 76e8a7ec80
commit 81407ede2b
1 changed files with 11 additions and 11 deletions

View File

@ -350,7 +350,7 @@
"metadata": {},
"outputs": [],
"source": [
"np.sum(test_set[\"total_bedrooms\"].isnull())"
"test_set[\"total_bedrooms\"].isnull().sum()"
]
},
{
@ -1428,8 +1428,8 @@
"\n",
" def fit(self, X, y=None): # y is required even though we don't use it\n",
" X = check_array(X) # checks that X is an array with finite float values\n",
" self.mean_ = np.mean(X, axis=0)\n",
" self.scale_ = np.std(X, axis=0)\n",
" self.mean_ = X.mean(axis=0)\n",
" self.scale_ = X.std(axis=0)\n",
" self.n_features_in_ = X.shape[1] # every estimator stores this in fit()\n",
" return self # always return self!\n",
"\n",
@ -1529,7 +1529,7 @@
"metadata": {},
"outputs": [],
"source": [
"np.round(similarities[:3], 2)"
"similarities[:3].round(2)"
]
},
{
@ -1618,7 +1618,7 @@
"outputs": [],
"source": [
"housing_num_prepared = num_pipeline.fit_transform(housing_num)\n",
"np.round(housing_num_prepared[:2], 2)"
"housing_num_prepared[:2].round(2)"
]
},
{
@ -1881,7 +1881,7 @@
"outputs": [],
"source": [
"housing_predictions = lin_reg.predict(housing)\n",
"np.round(housing_predictions[:5], -2)"
"housing_predictions[:5].round(-2)"
]
},
{
@ -1907,7 +1907,7 @@
"outputs": [],
"source": [
"# Not in the book\n",
"error_ratios = np.round(housing_predictions[:5], -2) / housing_labels.iloc[:5].values - 1\n",
"error_ratios = housing_predictions[:5].round(-2) / housing_labels.iloc[:5].values - 1\n",
"print(\", \".join([f\"{100 * ratio:.1f}%\" for ratio in error_ratios]))"
]
},
@ -2151,7 +2151,7 @@
" \"split1_test_score\", \"split2_test_score\", \"mean_test_score\"]]\n",
"score_cols = [\"split0\", \"split1\", \"split2\", \"mean_test_rmse\"]\n",
"cv_res.columns = [\"n_clusters\", \"max_features\"] + score_cols\n",
"cv_res[score_cols] = -np.round(cv_res[score_cols]).astype(np.int64)\n",
"cv_res[score_cols] = -cv_res[score_cols].round().astype(np.int64)\n",
"cv_res.head()"
]
},
@ -2212,7 +2212,7 @@
" \"param_random_forest__max_features\", \"split0_test_score\",\n",
" \"split1_test_score\", \"split2_test_score\", \"mean_test_score\"]]\n",
"cv_res.columns = [\"n_clusters\", \"max_features\"] + score_cols\n",
"cv_res[score_cols] = -np.round(cv_res[score_cols]).astype(np.int64)\n",
"cv_res[score_cols] = -cv_res[score_cols].round().astype(np.int64)\n",
"cv_res.head()"
]
},
@ -2368,7 +2368,7 @@
"source": [
"final_model = rnd_search.best_estimator_\n",
"feature_importances = final_model[\"random_forest\"].feature_importances_\n",
"np.round(feature_importances, 2)"
"feature_importances.round(2)"
]
},
{
@ -2733,7 +2733,7 @@
"np.random.seed(42)\n",
"\n",
"s = expon(scale=1).rvs(100_000) # get 100,000 samples\n",
"np.sum((s > 0.105) & (s < 2.29)) / 100_000"
"((s > 0.105) & (s < 2.29)).sum() / 100_000"
]
},
{