diff --git a/examples/audio/md/stft.md b/examples/audio/md/stft.md index 6f898ae474..08c6d42001 100644 --- a/examples/audio/md/stft.md +++ b/examples/audio/md/stft.md @@ -605,7 +605,7 @@ plt.show() - + @@ -618,7 +618,7 @@ plot_single_spectrogram(sample_wav_data) - + @@ -631,7 +631,7 @@ plot_multi_bandwidth_spectrogram(sample_wav_data) - + @@ -643,9 +643,7 @@ def read_dataset(df, folds): msk = df["fold"].isin(folds) filenames = df["filename"][msk] targets = df["target"][msk].values - waves = np.array( - [read_wav_file(fil) for fil in filenames], dtype=np.float32 - ) + waves = np.array([read_wav_file(fil) for fil in filenames], dtype=np.float32) return waves, targets ``` @@ -1790,7 +1788,7 @@ plt.show() - + @@ -1802,9 +1800,7 @@ Running the models on the test set. ```python _, test_acc = model1d.evaluate(test_x, test_y) -print( - f"1D model wit non-trainable STFT -> Test Accuracy: {test_acc * 100:.2f}%" -) +print(f"1D model wit non-trainable STFT -> Test Accuracy: {test_acc * 100:.2f}%") ``` [1m3/3[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m3s[0m 307ms/step - accuracy: 0.8148 - loss: 0.6244 diff --git a/templates/examples/audio/stft.md b/templates/examples/audio/stft.md index 7fe77f719a..de73a0453d 100644 --- a/templates/examples/audio/stft.md +++ b/templates/examples/audio/stft.md @@ -6,6 +6,7 @@ **Description:** Introducing the `STFTSpectrogram` layer to extract spectrograms for audio classification. +