From 7eee161e7af56b4d2fc400636ace93974c5bbe5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20Geron?= Date: Fri, 27 Mar 2020 19:17:39 +1300 Subject: [PATCH] Add the coding exercise solutions for chapter 15 (SketchRNN and Bach chorale generator) --- ...essing_sequences_using_rnns_and_cnns.ipynb | 786 ++++++++++++++++-- 1 file changed, 707 insertions(+), 79 deletions(-) diff --git a/15_processing_sequences_using_rnns_and_cnns.ipynb b/15_processing_sequences_using_rnns_and_cnns.ipynb index f0f5f55..ebc6a51 100644 --- a/15_processing_sequences_using_rnns_and_cnns.ipynb +++ b/15_processing_sequences_using_rnns_and_cnns.ipynb @@ -65,7 +65,7 @@ "from tensorflow import keras\n", "assert tf.__version__ >= \"2.0\"\n", "\n", - "if not tf.test.is_gpu_available():\n", + "if not tf.config.list_physical_devices('GPU'):\n", " print(\"No GPU was detected. LSTMs and CNNs can be very slow without a GPU.\")\n", " if IS_COLAB:\n", " print(\"Go to Runtime > Change runtime and select a GPU hardware accelerator.\")\n", @@ -73,6 +73,7 @@ "# Common imports\n", "import numpy as np\n", "import os\n", + "from pathlib import Path\n", "\n", "# to make this notebook's output stable across runs\n", "np.random.seed(42)\n", @@ -1223,7 +1224,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 1. to 6." + "## 1. to 8." ] }, { @@ -1237,14 +1238,21 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 7. Embedded Reber Grammars" + "## 9. Tackling the SketchRNN Dataset" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "First we need to build a function that generates strings based on a grammar. The grammar will be represented as a list of possible transitions for each state. A transition specifies the string to output (or a grammar to generate it) and the next state." + "_Exercise: Train a classification model for the SketchRNN dataset, available in TensorFlow Datasets._" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The dataset is not available in TFDS yet, the [pull request](https://github.com/tensorflow/datasets/pull/361) is still work in progress. Luckily, the data is conveniently available as TFRecords, so let's download it (it might take a while, as it's about 1 GB large, with 3,450,000 training sketches and 345,000 test sketches):" ] }, { @@ -1253,43 +1261,12 @@ "metadata": {}, "outputs": [], "source": [ - "np.random.seed(42)\n", - "\n", - "default_reber_grammar = [\n", - " [(\"B\", 1)], # (state 0) =B=>(state 1)\n", - " [(\"T\", 2), (\"P\", 3)], # (state 1) =T=>(state 2) or =P=>(state 3)\n", - " [(\"S\", 2), (\"X\", 4)], # (state 2) =S=>(state 2) or =X=>(state 4)\n", - " [(\"T\", 3), (\"V\", 5)], # and so on...\n", - " [(\"X\", 3), (\"S\", 6)],\n", - " [(\"P\", 4), (\"V\", 6)],\n", - " [(\"E\", None)]] # (state 6) =E=>(terminal state)\n", - "\n", - "embedded_reber_grammar = [\n", - " [(\"B\", 1)],\n", - " [(\"T\", 2), (\"P\", 3)],\n", - " [(default_reber_grammar, 4)],\n", - " [(default_reber_grammar, 5)],\n", - " [(\"T\", 6)],\n", - " [(\"P\", 6)],\n", - " [(\"E\", None)]]\n", - "\n", - "def generate_string(grammar):\n", - " state = 0\n", - " output = []\n", - " while state is not None:\n", - " index = np.random.randint(len(grammar[state]))\n", - " production, state = grammar[state][index]\n", - " if isinstance(production, list):\n", - " production = generate_string(grammar=production)\n", - " output.append(production)\n", - " return \"\".join(output)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's generate a few strings based on the default Reber grammar:" + "DOWNLOAD_ROOT = \"http://download.tensorflow.org/data/\"\n", + "FILENAME = \"quickdraw_tutorial_dataset_v1.tar.gz\"\n", + "filepath = keras.utils.get_file(FILENAME,\n", + " DOWNLOAD_ROOT + FILENAME,\n", + " cache_subdir=\"datasets/quickdraw\",\n", + " extract=True)" ] }, { @@ -1298,15 +1275,9 @@ "metadata": {}, "outputs": [], "source": [ - "for _ in range(25):\n", - " print(generate_string(default_reber_grammar), end=\" \")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Looks good. Now let's generate a few strings based on the embedded Reber grammar:" + "quickdraw_dir = Path(filepath).parent\n", + "train_files = sorted([str(path) for path in quickdraw_dir.glob(\"training.tfrecord-*\")])\n", + "eval_files = sorted([str(path) for path in quickdraw_dir.glob(\"eval.tfrecord-*\")])" ] }, { @@ -1315,15 +1286,7 @@ "metadata": {}, "outputs": [], "source": [ - "for _ in range(25):\n", - " print(generate_string(embedded_reber_grammar), end=\" \")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Okay, now we need a function to generate strings that do not respect the grammar. We could generate a random string, but the task would be a bit too easy, so instead we will generate a string that respects the grammar, and we will corrupt it by changing just one character:" + "train_files" ] }, { @@ -1332,19 +1295,7 @@ "metadata": {}, "outputs": [], "source": [ - "def generate_corrupted_string(grammar, chars=\"BEPSTVX\"):\n", - " good_string = generate_string(grammar)\n", - " index = np.random.randint(len(good_string))\n", - " good_char = good_string[index]\n", - " bad_char = np.random.choice(sorted(set(chars) - set(good_char)))\n", - " return good_string[:index] + bad_char + good_string[index + 1:]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's look at a few corrupted strings:" + "eval_files" ] }, { @@ -1353,23 +1304,700 @@ "metadata": {}, "outputs": [], "source": [ - "for _ in range(25):\n", - " print(generate_corrupted_string(embedded_reber_grammar), end=\" \")" + "with open(quickdraw_dir / \"eval.tfrecord.classes\") as test_classes_file:\n", + " test_classes = test_classes_file.readlines()\n", + " \n", + "with open(quickdraw_dir / \"training.tfrecord.classes\") as train_classes_file:\n", + " train_classes = train_classes_file.readlines()" + ] + }, + { + "cell_type": "code", + "execution_count": 68, + "metadata": {}, + "outputs": [], + "source": [ + "assert train_classes == test_classes\n", + "class_names = [name.strip().lower() for name in train_classes]" + ] + }, + { + "cell_type": "code", + "execution_count": 69, + "metadata": {}, + "outputs": [], + "source": [ + "sorted(class_names)" + ] + }, + { + "cell_type": "code", + "execution_count": 70, + "metadata": {}, + "outputs": [], + "source": [ + "def parse(data_batch):\n", + " feature_descriptions = {\n", + " \"ink\": tf.io.VarLenFeature(dtype=tf.float32),\n", + " \"shape\": tf.io.FixedLenFeature([2], dtype=tf.int64),\n", + " \"class_index\": tf.io.FixedLenFeature([1], dtype=tf.int64)\n", + " }\n", + " examples = tf.io.parse_example(data_batch, feature_descriptions)\n", + " flat_sketches = tf.sparse.to_dense(examples[\"ink\"])\n", + " sketches = tf.reshape(flat_sketches, shape=[tf.size(data_batch), -1, 3])\n", + " lengths = examples[\"shape\"][:, 0]\n", + " labels = examples[\"class_index\"][:, 0]\n", + " return sketches, lengths, labels" + ] + }, + { + "cell_type": "code", + "execution_count": 71, + "metadata": {}, + "outputs": [], + "source": [ + "def quickdraw_dataset(filepaths, batch_size=32, shuffle_buffer_size=None,\n", + " n_parse_threads=5, n_read_threads=5, cache=False):\n", + " dataset = tf.data.TFRecordDataset(filepaths,\n", + " num_parallel_reads=n_read_threads)\n", + " if cache:\n", + " dataset = dataset.cache()\n", + " if shuffle_buffer_size:\n", + " dataset = dataset.shuffle(shuffle_buffer_size)\n", + " dataset = dataset.batch(batch_size)\n", + " dataset = dataset.map(parse, num_parallel_calls=n_parse_threads)\n", + " return dataset.prefetch(1)" + ] + }, + { + "cell_type": "code", + "execution_count": 72, + "metadata": {}, + "outputs": [], + "source": [ + "train_set = quickdraw_dataset(train_files, shuffle_buffer_size=10000)\n", + "valid_set = quickdraw_dataset(eval_files[:5])\n", + "test_set = quickdraw_dataset(eval_files[:5])" + ] + }, + { + "cell_type": "code", + "execution_count": 73, + "metadata": {}, + "outputs": [], + "source": [ + "for sketches, lengths, labels in train_set.take(1):\n", + " print(\"sketches =\", sketches)\n", + " print(\"lengths =\", lengths)\n", + " print(\"labels =\", labels)" + ] + }, + { + "cell_type": "code", + "execution_count": 74, + "metadata": {}, + "outputs": [], + "source": [ + "def draw_sketch(sketch, label=None):\n", + " origin = np.array([[0., 0., 0.]])\n", + " sketch = np.r_[origin, sketch]\n", + " stroke_end_indices = np.argwhere(sketch[:, -1]==1.)[:, 0]\n", + " coordinates = np.cumsum(sketch[:, :2], axis=0)\n", + " strokes = np.split(coordinates, stroke_end_indices + 1)\n", + " title = class_names[label.numpy()] if label is not None else \"Try to guess\"\n", + " plt.title(title)\n", + " plt.plot(coordinates[:, 0], -coordinates[:, 1], \"y:\")\n", + " for stroke in strokes:\n", + " plt.plot(stroke[:, 0], -stroke[:, 1], \".-\")\n", + " plt.axis(\"off\")\n", + "\n", + "def draw_sketches(sketches, lengths, labels):\n", + " n_sketches = len(sketches)\n", + " n_cols = 4\n", + " n_rows = (n_sketches - 1) // n_cols + 1\n", + " plt.figure(figsize=(n_cols * 3, n_rows * 3.5))\n", + " for index, sketch, length, label in zip(range(n_sketches), sketches, lengths, labels):\n", + " plt.subplot(n_rows, n_cols, index + 1)\n", + " draw_sketch(sketch[:length], label)\n", + " plt.show()\n", + "\n", + "for sketches, lengths, labels in train_set.take(1):\n", + " draw_sketches(sketches, lengths, labels)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "To be continued..." + "Most sketches are composed of less than 100 points:" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 75, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "lengths = np.concatenate([lengths for _, lengths, _ in train_set.take(1000)])\n", + "plt.hist(lengths, bins=150, density=True)\n", + "plt.axis([0, 200, 0, 0.03])\n", + "plt.xlabel(\"length\")\n", + "plt.ylabel(\"density\")\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 76, + "metadata": {}, + "outputs": [], + "source": [ + "def crop_long_sketches(dataset, max_length=100):\n", + " return dataset.map(lambda inks, lengths, labels: (inks[:, :max_length], labels))\n", + "\n", + "cropped_train_set = crop_long_sketches(train_set)\n", + "cropped_valid_set = crop_long_sketches(valid_set)\n", + "cropped_test_set = crop_long_sketches(test_set)" + ] + }, + { + "cell_type": "code", + "execution_count": 77, + "metadata": {}, + "outputs": [], + "source": [ + "model = keras.models.Sequential([\n", + " keras.layers.Conv1D(32, kernel_size=5, strides=2, activation=\"relu\"),\n", + " keras.layers.BatchNormalization(),\n", + " keras.layers.Conv1D(64, kernel_size=5, strides=2, activation=\"relu\"),\n", + " keras.layers.BatchNormalization(),\n", + " keras.layers.Conv1D(128, kernel_size=3, strides=2, activation=\"relu\"),\n", + " keras.layers.BatchNormalization(),\n", + " keras.layers.LSTM(128, return_sequences=True),\n", + " keras.layers.LSTM(128),\n", + " keras.layers.Dense(len(class_names), activation=\"softmax\")\n", + "])\n", + "optimizer = keras.optimizers.SGD(lr=1e-2, clipnorm=1.)\n", + "model.compile(loss=\"sparse_categorical_crossentropy\",\n", + " optimizer=optimizer,\n", + " metrics=[\"accuracy\", \"sparse_top_k_categorical_accuracy\"])\n", + "history = model.fit(cropped_train_set, epochs=2,\n", + " validation_data=cropped_valid_set)" + ] + }, + { + "cell_type": "code", + "execution_count": 78, + "metadata": {}, + "outputs": [], + "source": [ + "y_test = np.concatenate([labels for _, _, labels in test_set])\n", + "y_probas = model.predict(test_set)" + ] + }, + { + "cell_type": "code", + "execution_count": 79, + "metadata": {}, + "outputs": [], + "source": [ + "np.mean(keras.metrics.sparse_top_k_categorical_accuracy(y_test, y_probas))" + ] + }, + { + "cell_type": "code", + "execution_count": 80, + "metadata": {}, + "outputs": [], + "source": [ + "n_new = 10\n", + "Y_probas = model.predict(sketches)\n", + "top_k = tf.nn.top_k(Y_probas, k=5)\n", + "for index in range(n_new):\n", + " plt.figure(figsize=(3, 3.5))\n", + " draw_sketch(sketches[index])\n", + " plt.show()\n", + " print(\"Top-5 predictions:\".format(index + 1))\n", + " for k in range(5):\n", + " class_name = class_names[top_k.indices[index, k]]\n", + " proba = 100 * top_k.values[index, k]\n", + " print(\" {}. {} {:.3f}%\".format(k + 1, class_name, proba))\n", + " print(\"Answer: {}\".format(class_names[labels[index].numpy()]))" + ] + }, + { + "cell_type": "code", + "execution_count": 81, + "metadata": {}, + "outputs": [], + "source": [ + "model.save(\"my_sketchrnn\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 10. Bach Chorales\n", + "_Exercise: Download the [Bach chorales](https://homl.info/bach) dataset and unzip it. It is composed of 382 chorales composed by Johann Sebastian Bach. Each chorale is 100 to 640 time steps long, and each time step contains 4 integers, where each integer corresponds to a note's index on a piano (except for the value 0, which means that no note is played). Train a model—recurrent, convolutional, or both—that can predict the next time step (four notes), given a sequence of time steps from a chorale. Then use this model to generate Bach-like music, one note at a time: you can do this by giving the model the start of a chorale and asking it to predict the next time step, then appending these time steps to the input sequence and asking the model for the next note, and so on. Also make sure to check out [Google's Coconet model](https://homl.info/coconet), which was used for a nice [Google doodle about Bach](https://www.google.com/doodles/celebrating-johann-sebastian-bach)._\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 82, + "metadata": {}, + "outputs": [], + "source": [ + "DOWNLOAD_ROOT = \"https://github.com/ageron/handson-ml2/raw/master/datasets/jsb_chorales/\"\n", + "FILENAME = \"jsb_chorales.tgz\"\n", + "filepath = keras.utils.get_file(FILENAME,\n", + " DOWNLOAD_ROOT + FILENAME,\n", + " cache_subdir=\"datasets/jsb_chorales\",\n", + " extract=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 83, + "metadata": {}, + "outputs": [], + "source": [ + "jsb_chorales_dir = Path(filepath).parent\n", + "train_files = sorted(jsb_chorales_dir.glob(\"train/chorale_*.csv\"))\n", + "valid_files = sorted(jsb_chorales_dir.glob(\"valid/chorale_*.csv\"))\n", + "test_files = sorted(jsb_chorales_dir.glob(\"test/chorale_*.csv\"))" + ] + }, + { + "cell_type": "code", + "execution_count": 84, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "def load_chorales(filepaths):\n", + " return [pd.read_csv(filepath).values.tolist() for filepath in filepaths]\n", + "\n", + "train_chorales = load_chorales(train_files)\n", + "valid_chorales = load_chorales(valid_files)\n", + "test_chorales = load_chorales(test_files)" + ] + }, + { + "cell_type": "code", + "execution_count": 85, + "metadata": {}, + "outputs": [], + "source": [ + "train_chorales[0]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notes range from 36 (C1 = C on octave 1) to 81 (A5 = A on octave 5), plus 0 for silence:" + ] + }, + { + "cell_type": "code", + "execution_count": 86, + "metadata": {}, + "outputs": [], + "source": [ + "notes = set()\n", + "for chorales in (train_chorales, valid_chorales, test_chorales):\n", + " for chorale in chorales:\n", + " for chord in chorale:\n", + " notes |= set(chord)\n", + "\n", + "n_notes = len(notes)\n", + "min_note = min(notes - {0})\n", + "max_note = max(notes)\n", + "\n", + "assert min_note == 36\n", + "assert max_note == 81" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's write a few functions to listen to these chorales (you don't need to understand the details here, and in fact there are certainly simpler ways to do this, for example using MIDI players, but I just wanted to have a bit of fun writing a synthesizer):" + ] + }, + { + "cell_type": "code", + "execution_count": 87, + "metadata": {}, + "outputs": [], + "source": [ + "from IPython.display import Audio\n", + "\n", + "def notes_to_frequencies(notes):\n", + " # Frequency doubles when you go up one octave; there are 12 semi-tones\n", + " # per octave; Note A on octave 4 is 440 Hz, and it is note number 69.\n", + " return 2 ** ((np.array(notes) - 69) / 12) * 440\n", + "\n", + "def frequencies_to_samples(frequencies, tempo, sample_rate):\n", + " note_duration = 60 / tempo # the tempo is measured in beats per minutes\n", + " # To reduce click sound at every beat, we round the frequencies to try to\n", + " # get the samples close to zero at the end of each note.\n", + " frequencies = np.round(note_duration * frequencies) / note_duration\n", + " n_samples = int(note_duration * sample_rate)\n", + " time = np.linspace(0, note_duration, n_samples)\n", + " sine_waves = np.sin(2 * np.pi * frequencies.reshape(-1, 1) * time)\n", + " # Removing all notes with frequencies ≤ 9 Hz (includes note 0 = silence)\n", + " sine_waves *= (frequencies > 9.).reshape(-1, 1)\n", + " return sine_waves.reshape(-1)\n", + "\n", + "def chords_to_samples(chords, tempo, sample_rate):\n", + " freqs = notes_to_frequencies(chords)\n", + " freqs = np.r_[freqs, freqs[-1:]] # make last note a bit longer\n", + " merged = np.mean([frequencies_to_samples(melody, tempo, sample_rate)\n", + " for melody in freqs.T], axis=0)\n", + " n_fade_out_samples = sample_rate * 60 // tempo # fade out last note\n", + " fade_out = np.linspace(1., 0., n_fade_out_samples)**2\n", + " merged[-n_fade_out_samples:] *= fade_out\n", + " return merged\n", + "\n", + "def play_chords(chords, tempo=160, amplitude=0.1, sample_rate=44100, filepath=None):\n", + " samples = amplitude * chords_to_samples(chords, tempo, sample_rate)\n", + " if filepath:\n", + " from scipy.io import wavfile\n", + " samples = (2**15 * samples).astype(np.int16)\n", + " wavfile.write(filepath, sample_rate, samples)\n", + " return display(Audio(filepath))\n", + " else:\n", + " return display(Audio(samples, rate=sample_rate, normalize=False))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's listen to a few chorales:" + ] + }, + { + "cell_type": "code", + "execution_count": 88, + "metadata": {}, + "outputs": [], + "source": [ + "for index in range(3):\n", + " play_chords(train_chorales[index])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Divine! :)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In order to be able to generate new chorales, we want to train a model that can predict the next chord given all the previous chords. If we naively try to predict the next chord in one shot, predicting all 4 notes at once, we run the risk of getting notes that don't go very well together (believe me, I tried). It's much better and simpler to predict one note at a time. So we will need to preprocess every chorale, turning each chord into an arpegio (i.e., a sequence of notes rather than notes played simultaneuously). So each chorale will be a long sequence of notes (rather than chords), and we can just train a model that can predict the next note given all the previous notes. We will use a sequence-to-sequence approach, where we feed a window to the neural net, and it tries to predict that same window shifted one time step into the future.\n", + "\n", + "We will also shift the values so that they range from 0 to 46, where 0 represents silence, and values 1 to 46 represent notes 36 (C1) to 81 (A5).\n", + "\n", + "And we will train the model on windows of 128 notes (i.e., 32 chords).\n", + "\n", + "Since the dataset fits in memory, we could preprocess the chorales in RAM using any Python code we like, but I will demonstrate here how to do all the preprocessing using tf.data (there will be more details about creating windows using tf.data in the next chapter)." + ] + }, + { + "cell_type": "code", + "execution_count": 89, + "metadata": {}, + "outputs": [], + "source": [ + "def create_target(batch):\n", + " X = batch[:, :-1]\n", + " Y = batch[:, 1:] # predict next note in each arpegio, at each step\n", + " return X, Y\n", + "\n", + "def preprocess(window):\n", + " window = tf.where(window == 0, window, window - min_note + 1) # shift values\n", + " return tf.reshape(window, [-1]) # convert to arpegio\n", + "\n", + "def bach_dataset(chorales, batch_size=32, shuffle_buffer_size=None,\n", + " window_size=32, window_shift=16, cache=True):\n", + " def batch_window(window):\n", + " return window.batch(window_size + 1)\n", + "\n", + " def to_windows(chorale):\n", + " dataset = tf.data.Dataset.from_tensor_slices(chorale)\n", + " dataset = dataset.window(window_size + 1, window_shift, drop_remainder=True)\n", + " return dataset.flat_map(batch_window)\n", + "\n", + " chorales = tf.ragged.constant(chorales, ragged_rank=1)\n", + " dataset = tf.data.Dataset.from_tensor_slices(chorales)\n", + " dataset = dataset.flat_map(to_windows).map(preprocess)\n", + " if cache:\n", + " dataset = dataset.cache()\n", + " if shuffle_buffer_size:\n", + " dataset = dataset.shuffle(shuffle_buffer_size)\n", + " dataset = dataset.batch(batch_size)\n", + " dataset = dataset.map(create_target)\n", + " return dataset.prefetch(1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's create the training set, the validation set and the test set:" + ] + }, + { + "cell_type": "code", + "execution_count": 90, + "metadata": {}, + "outputs": [], + "source": [ + "train_set = bach_dataset(train_chorales, shuffle_buffer_size=1000)\n", + "valid_set = bach_dataset(valid_chorales)\n", + "test_set = bach_dataset(test_chorales)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's create the model:\n", + "\n", + "* We could feed the note values directly to the model, as floats, but this would probably not give good results. Indeed, the relationships between notes are not that simple: for example, if you replace a C3 with a C4, the melody will still sound fine, even though these notes are 12 semi-tones apart (i.e., one octave). Conversely, if you replace a C3 with a C\\#3, it's very likely that the chord will sound horrible, despite these notes being just next to each other. So we will use an `Embedding` layer to convert each note to a small vector representation (see Chapter 16 for more details on embeddings). We will use 5-dimensional embeddings, so the output of this first layer will have a shape of `[batch_size, window_size, 5]`.\n", + "* We will then feed this data to a small WaveNet-like neural network, composed of a stack of 4 `Conv1D` layers with doubling dilation rates. We will intersperse these layers with `BatchNormalization` layers for faster better convergence.\n", + "* Then one `LSTM` layer to try to capture long-term patterns.\n", + "* And finally a `Dense` layer to produce the final note probabilities. It will predict one probability for each chorale in the batch, for each time step, and for each possible note (including silence). So the output shape will be `[batch_size, window_size, 47]`." + ] + }, + { + "cell_type": "code", + "execution_count": 91, + "metadata": {}, + "outputs": [], + "source": [ + "n_embedding_dims = 5\n", + "\n", + "model = keras.models.Sequential([\n", + " keras.layers.Embedding(input_dim=n_notes, output_dim=n_embedding_dims,\n", + " input_shape=[None]),\n", + " keras.layers.Conv1D(32, kernel_size=2, padding=\"causal\", activation=\"relu\"),\n", + " keras.layers.BatchNormalization(),\n", + " keras.layers.Conv1D(48, kernel_size=2, padding=\"causal\", activation=\"relu\", dilation_rate=2),\n", + " keras.layers.BatchNormalization(),\n", + " keras.layers.Conv1D(64, kernel_size=2, padding=\"causal\", activation=\"relu\", dilation_rate=4),\n", + " keras.layers.BatchNormalization(),\n", + " keras.layers.Conv1D(96, kernel_size=2, padding=\"causal\", activation=\"relu\", dilation_rate=8),\n", + " keras.layers.BatchNormalization(),\n", + " keras.layers.LSTM(256, return_sequences=True),\n", + " keras.layers.Dense(n_notes, activation=\"softmax\")\n", + "])\n", + "\n", + "model.summary()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we're ready to compile and train the model!" + ] + }, + { + "cell_type": "code", + "execution_count": 92, + "metadata": {}, + "outputs": [], + "source": [ + "optimizer = keras.optimizers.Nadam(lr=1e-3)\n", + "model.compile(loss=\"sparse_categorical_crossentropy\", optimizer=optimizer,\n", + " metrics=[\"accuracy\"])\n", + "model.fit(train_set, epochs=20, validation_data=valid_set)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "I have not done much hyperparameter search, so feel free to iterate on this model now and try to optimize it. For example, you could try removing the `LSTM` layer and replacing it with `Conv1D` layers. You could also play with the number of layers, the learning rate, the optimizer, and so on." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once you're satisfied with the performance of the model on the validation set, you can save it and evaluate it one last time on the test set:" + ] + }, + { + "cell_type": "code", + "execution_count": 93, + "metadata": {}, + "outputs": [], + "source": [ + "model.save(\"my_bach_model.h5\")\n", + "model.evaluate(test_set)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Note:** There's no real need for a test set in this exercise, since we will perform the final evaluation by just listening to the music produced by the model. So if you want, you can add the test set to the train set, and train the model again, hopefully getting a slightly better model." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now let's write a function that will generate a new chorale. We will give it a few seed chords, it will convert them to arpegios (the format expected by the model), and use the model to predict the next note, then the next, and so on. In the end, it will group the notes 4 by 4 to create chords again, and return the resulting chorale." + ] + }, + { + "cell_type": "code", + "execution_count": 94, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_chorale(model, seed_chords, length):\n", + " arpegio = preprocess(tf.constant(seed_chords, dtype=tf.int64))\n", + " arpegio = tf.reshape(arpegio, [1, -1])\n", + " for chord in range(length):\n", + " for note in range(4):\n", + " next_note = model.predict_classes(arpegio)[:1, -1:]\n", + " arpegio = tf.concat([arpegio, next_note], axis=1)\n", + " arpegio = tf.where(arpegio == 0, arpegio, arpegio + min_note - 1)\n", + " return tf.reshape(arpegio, shape=[-1, 4])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To test this function, we need some seed chords. Let's use the first 8 chords of one of the test chorales (it's actually just 2 different chords, each played 4 times):" + ] + }, + { + "cell_type": "code", + "execution_count": 95, + "metadata": {}, + "outputs": [], + "source": [ + "seed_chords = test_chorales[2][:8]\n", + "play_chords(seed_chords, amplitude=0.2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we are ready to generate our first chorale! Let's ask the function to generate 56 more chords, for a total of 64 chords, i.e., 16 bars (assuming 4 chords per bar, i.e., a 4/4 signature):" + ] + }, + { + "cell_type": "code", + "execution_count": 96, + "metadata": {}, + "outputs": [], + "source": [ + "new_chorale = generate_chorale(model, seed_chords, 56)\n", + "play_chords(new_chorale)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This approach has one major flaw: it is often too conservative. Indeed, the model will not take any risk, it will always choose the note with the highest score, and since repeating the previous note generally sounds good enough, it's the least risky option, so the algorithm will tend to make notes last longer and longer. Pretty boring. Plus, if you run the model multiple times, it will always generate the same melody.\n", + "\n", + "So let's spice things up a bit! Instead of always picking the note with the highest score, we will pick the next note randomly, according to the predicted probabilities. For example, if the model predicts a C3 with 75% probability, and a G3 with a 25% probability, then we will pick one of these two notes randomly, with these probabilities. We will also add a `temperature` parameter that will control how \"hot\" (i.e., daring) we want the system to feel. A high temperature will bring the predicted probabilities closer together, reducing the probability of the likely notes and increasing the probability of the unlikely ones." + ] + }, + { + "cell_type": "code", + "execution_count": 97, + "metadata": {}, + "outputs": [], + "source": [ + "def generate_chorale_v2(model, seed_chords, length, temperature=1):\n", + " arpegio = preprocess(tf.constant(seed_chords, dtype=tf.int64))\n", + " arpegio = tf.reshape(arpegio, [1, -1])\n", + " for chord in range(length):\n", + " for note in range(4):\n", + " next_note_probas = model.predict(arpegio)[0, -1:]\n", + " rescaled_logits = tf.math.log(next_note_probas) / temperature\n", + " next_note = tf.random.categorical(rescaled_logits, num_samples=1)\n", + " arpegio = tf.concat([arpegio, next_note], axis=1)\n", + " arpegio = tf.where(arpegio == 0, arpegio, arpegio + min_note - 1)\n", + " return tf.reshape(arpegio, shape=[-1, 4])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's generate 3 chorales using this new function: one cold, one medium, and one hot (feel free to experiment with other seeds, lengths and temperatures). The code saves each chorale to a separate file. You can run these cells over an over again until you generate a masterpiece!\n", + "\n", + "**Please share your most beautiful generated chorale with me on Twitter @aureliengeron, I would really appreciate it! :))**" + ] + }, + { + "cell_type": "code", + "execution_count": 98, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "new_chorale_v2_cold = generate_chorale_v2(model, seed_chords, 56, temperature=0.8)\n", + "play_chords(new_chorale_v2_cold, filepath=\"bach_cold.wav\")" + ] + }, + { + "cell_type": "code", + "execution_count": 99, + "metadata": {}, + "outputs": [], + "source": [ + "new_chorale_v2_medium = generate_chorale_v2(model, seed_chords, 56, temperature=1.0)\n", + "play_chords(new_chorale_v2_medium, filepath=\"bach_medium.wav\")" + ] + }, + { + "cell_type": "code", + "execution_count": 100, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "new_chorale_v2_hot = generate_chorale_v2(model, seed_chords, 56, temperature=1.5)\n", + "play_chords(new_chorale_v2_hot, filepath=\"bach_hot.wav\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Lastly, you can try a fun social experiment: send your friends a few of your favorite generated chorales, plus the real chorale, and ask them to guess which one is the real one!" + ] + }, + { + "cell_type": "code", + "execution_count": 101, + "metadata": {}, + "outputs": [], + "source": [ + "play_chords(test_chorales[2][:64], filepath=\"bach_test_4.wav\")" + ] } ], "metadata": { @@ -1388,7 +2016,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.3" + "version": "3.7.6" }, "nav_menu": {}, "toc": { @@ -1402,5 +2030,5 @@ } }, "nbformat": 4, - "nbformat_minor": 1 + "nbformat_minor": 4 }