diff --git a/16_nlp_with_rnns_and_attention.ipynb b/16_nlp_with_rnns_and_attention.ipynb index 24db9a7..b90b5b8 100644 --- a/16_nlp_with_rnns_and_attention.ipynb +++ b/16_nlp_with_rnns_and_attention.ipynb @@ -66,7 +66,7 @@ "from tensorflow import keras\n", "assert tf.__version__ >= \"2.0\"\n", "\n", - "if not tf.test.is_gpu_available():\n", + "if not tf.config.list_physical_devices('GPU'):\n", " print(\"No GPU was detected. LSTMs and CNNs can be very slow without a GPU.\")\n", " if IS_COLAB:\n", " print(\"Go to Runtime > Change runtime and select a GPU hardware accelerator.\")\n", @@ -317,12 +317,9 @@ "source": [ "model = keras.models.Sequential([\n", " keras.layers.GRU(128, return_sequences=True, input_shape=[None, max_id],\n", - " # no dropout in stateful RNN (https://github.com/ageron/handson-ml2/issues/32)\n", - " # dropout=0.2, recurrent_dropout=0.2,\n", - " ),\n", + " dropout=0.2, recurrent_dropout=0.2),\n", " keras.layers.GRU(128, return_sequences=True,\n", - " # dropout=0.2, recurrent_dropout=0.2\n", - " ),\n", + " dropout=0.2, recurrent_dropout=0.2),\n", " keras.layers.TimeDistributed(keras.layers.Dense(max_id,\n", " activation=\"softmax\"))\n", "])\n", @@ -1237,7 +1234,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.7.6" }, "nav_menu": {}, "toc": { @@ -1251,5 +1248,5 @@ } }, "nbformat": 4, - "nbformat_minor": 1 + "nbformat_minor": 2 }