import tensorflow as tf
print(tf.__version__)
import numpy as np
np.random.seed(2022)
# Windows で music21 を使うための余分なコード
import os
from music21 import environment
if os.name == 'nt':
us = environment.UserSettings()
us['musescoreDirectPNGPath']='C:/Program Files/MuseScore 3/bin/MuseScore3.exe'
us['musicxmlPath']='C:/Program Files/MuseScore 3/bin/MuseScore3.exe'
data_filepath = 'run/music_params.pkl'
save_path = 'run'
import sys
sys.path.append('./nw')
from LSTMMusic import ScoreDataset
if not os.path.exists(data_filepath):
data_seq = ScoreDataset(save_path=data_filepath, midi_paths=midi_paths, seq_len=32)
else:
data_seq = ScoreDataset(save_path=data_filepath)
# split data into train and val data.
import numpy as np
val_split = 0.05
N_DATA = len(data_seq)
N_VAL = int(N_DATA * val_split)
arr = np.arange(N_DATA)
np.random.shuffle(arr)
train_indices = sorted(arr[:-N_VAL])
val_indices = sorted(arr[-N_VAL:])
(train_x_notes, train_x_durations), (train_y_notes, train_y_durations) = data_seq[train_indices]
(val_x_notes, val_x_durations), (val_y_notes, val_y_durations) = data_seq[val_indices]
print(len(train_indices), len(val_indices))
print(train_indices[:20])
print(val_indices[:20])
train_x_notes = np.array(train_x_notes)
train_x_durations = np.array(train_x_durations)
val_x_notes = np.array(val_x_notes)
val_x_durations = np.array(val_x_durations)
import tensorflow as tf
train_y_notes_ohv = tf.keras.utils.to_categorical(train_y_notes, data_seq.c_notes)
train_y_durations_ohv = tf.keras.utils.to_categorical(train_y_durations, data_seq.c_durations)
val_y_notes_ohv = tf.keras.utils.to_categorical(val_y_notes, data_seq.c_notes)
val_y_durations_ohv = tf.keras.utils.to_categorical(val_y_durations, data_seq.c_durations)
import sys
sys.path.append('./nw')
from LSTMMusic import LSTMMusic
lstm_music = LSTMMusic.load(save_path)
print(lstm_music.epochs)
%matplotlib inline
import matplotlib.pyplot as plt
LSTMMusic.plot_history(
[
lstm_music.losses, lstm_music.n_losses, lstm_music.d_losses,
lstm_music.val_losses, lstm_music.val_n_losses, lstm_music.val_d_losses
],
[
'loss', 'pitch_loss', 'duration_loss', 'val_loss', 'val_pitch_loss', 'val_duration_loss'
]
)
s_notes0 = val_x_notes[0][:lstm_music.seq_len]
s_durations0 = val_x_durations[0][:lstm_music.seq_len]
g_notes0, g_durations0 = lstm_music.generate(s_notes0, s_durations0, 64,0.5,0.5)
midi_stream0 = data_seq.getMidiStream(g_notes0, g_durations0)
print(g_notes0)
midi_stream0= midi_stream0.chordify()
midi_path = os.path.join(save_path, f'output_0_{lstm_music.epochs}.mid')
midi_stream0.write('midi', fp=midi_path)
midi_stream0.show()
midi_stream0.show('midi')
s_notes1 = val_x_notes[1][:lstm_music.seq_len]
s_durations1 = val_x_durations[1][:lstm_music.seq_len]
g_notes1, g_durations1 = lstm_music.generate(s_notes1, s_durations1, 64)
midi_stream1 = data_seq.getMidiStream(g_notes1, g_durations1)
print(g_notes1)
midi_stream1 = midi_stream1.chordify()
midi_path = os.path.join(save_path, f'output_1_{lstm_music.epochs}.mid')
midi_stream1.write('midi', fp=midi_path)
midi_stream1.show()
midi_stream1.show('midi')
s_notes2 = val_x_notes[2][:lstm_music.seq_len]
s_durations2 = val_x_durations[2][:lstm_music.seq_len]
g_notes2, g_durations2 = lstm_music.generate(s_notes2, s_durations2, 64)
midi_stream2 = data_seq.getMidiStream(g_notes2, g_durations2)
print(g_notes2)
midi_stream2 = midi_stream2.chordify()
midi_path = os.path.join(save_path, f'output_2_{lstm_music.epochs}.mid')
midi_stream2.write('midi', fp=midi_path)
midi_stream2.show()
midi_stream2.show('midi')