diff --git a/amy.py b/amy.py index b1b20fe2..68947a80 100644 --- a/amy.py +++ b/amy.py @@ -162,7 +162,7 @@ def message(**kwargs): # Each keyword maps to two chars, first is the wire protocol prefix, second is an arg type code # I=int, F=float, S=str, L=list, C=ctrl_coefs kw_map = {'osc': 'vI', 'wave': 'wI', 'note': 'nF', 'vel': 'lF', 'amp': 'aC', 'freq': 'fC', 'duty': 'dC', 'feedback': 'bF', 'time': 'tI', - 'reset': 'SI', 'phase': 'PF', 'pan': 'QC', 'client': 'gI', 'volume': 'vF', 'pitch_bend': 'sF', 'filter_freq': 'FC', 'resonance': 'RF', + 'reset': 'SI', 'phase': 'PF', 'pan': 'QC', 'client': 'gI', 'volume': 'VF', 'pitch_bend': 'sF', 'filter_freq': 'FC', 'resonance': 'RF', 'bp0': 'AL', 'bp1': 'BL', 'eg0_type': 'TI', 'eg1_type': 'XI', 'debug': 'DI', 'chained_osc': 'cI', 'mod_source': 'LI', 'eq': 'xL', 'filter_type': 'GI', 'algorithm': 'oI', 'ratio': 'IF', 'latency_ms': 'NI', 'algo_source': 'OL', 'load_sample': 'zL', 'chorus': 'kL', 'reverb': 'hL', 'echo': 'ML', 'load_patch': 'KI', 'store_patch': 'uS', 'voices': 'rL', diff --git a/experiments/mido_piano/000-IMSLP172781-WIMA.cb18-wtc01.mid b/experiments/mido_piano/000-IMSLP172781-WIMA.cb18-wtc01.mid new file mode 100644 index 00000000..2e3a7a21 Binary files /dev/null and b/experiments/mido_piano/000-IMSLP172781-WIMA.cb18-wtc01.mid differ diff --git a/experiments/mido_piano/001-chopin_op66.mid b/experiments/mido_piano/001-chopin_op66.mid new file mode 100644 index 00000000..1328ad04 Binary files /dev/null and b/experiments/mido_piano/001-chopin_op66.mid differ diff --git a/experiments/mido_piano/002-chopin_op_25_no_12.mid b/experiments/mido_piano/002-chopin_op_25_no_12.mid new file mode 100644 index 00000000..673c9f7e Binary files /dev/null and b/experiments/mido_piano/002-chopin_op_25_no_12.mid differ diff --git a/experiments/mido_piano/003-schumann_op_15_no_1.mid b/experiments/mido_piano/003-schumann_op_15_no_1.mid new file mode 100644 index 00000000..83da221b Binary files /dev/null and b/experiments/mido_piano/003-schumann_op_15_no_1.mid differ diff --git a/experiments/mido_piano/instrument.py b/experiments/mido_piano/instrument.py new file mode 100644 index 00000000..0240e5ee --- /dev/null +++ b/experiments/mido_piano/instrument.py @@ -0,0 +1,150 @@ +"""Piano MIDI sound module based on AMY and `mido`. + +Example usage: + +``` +piano = instrument.Piano() +piano.play_file('test.mid') +``` + +Assumptions: + * `amy.live()` and other setup are done external to this module. + * Patch 256 is piano, and we will use 32 voices. + * The `libamy` and `mido` packages are installed and supported by your + platform. `python-rtmidi` is also useful for `play_input`. + * You edited `#define AMY_OSCS` in amy/src/amy_config.h to have enough. + (Enough is (32 * 21 == 672), which is the number of voices in `Piano` + below times the number of oscs for the dpwe piano patch, except that + there are chorus oscs and 999 and stuff, so why not go big?) +""" + +import mido +import numpy as np + +import amy +from experiments.mido_piano import midi + + +class MidoSynth(midi.Synth): + """Bridge from `mido` to a Tulip `midi.Synth`.""" + + def __del__(self): + super().release() + + def play_message(self, + message: mido.Message, + time: float | None = None) -> None: + """Plays a single MIDI message. + + All input values ranges are assumed to be as in standard MIDI format, + which is in some cases different from what `midi.Synth` assumes. + + Args: + message: The message to play. + time: Optional time to forward to amy_send. + """ + if message.type == 'note_on': + self.note_on(message.note, + velocity=message.velocity / 127, + time=time) + elif message.type == 'note_off': + self.note_off(message.note, time=time) + elif message.is_cc(): + self.control_change(message.control, message.value, time=time) + + def play_file(self, + filename: str, + default_velocity: int = 64, + blocking: bool = True, + start_millis: float = 0.0) -> float: + """Plays a MIDI file. + + Args: + filename: Path to a MIDI file to play. + default_velocity: If this is set, and if all positive velocities of + note on events have the same value, then this value will replace the + constant velocity from the file. Without this, files with constant + velocity 127 sound bad. + blocking: Whether to use `mido.MidiFile.play`. The canonical use case + for setting this to `False` is `amy.render`. + start_millis: AMY time for the start of the file. Only matters when + `blocking == False`. + + Returns: + The duration of the MIDI file, in seconds. + """ + midi_file = mido.MidiFile(filename) + duration = sum((m.time for m in midi_file)) + velocities = [ + m.velocity for m in midi_file + if m.type == 'note_on' and m.velocity > 0 + ] + if (not velocities) or min(velocities) != max(velocities): + default_velocity = None + + def filter_fn(m: mido.Message) -> mido.Message: + if m.type == 'note_on' and m.velocity > 0 and default_velocity: + return m.copy(velocity=default_velocity) + return m + + if blocking: + for m in midi_file.play(): + self.play_message(filter_fn(m)) + else: + millis = start_millis + for m in midi_file: + m = filter_fn(m) + millis += m.time * 1000.0 + self.play_message(m, time=millis) + return duration + + def render(self, + filename: str, + volume_db: float = 0.0, + start_millis: float = 0.0) -> tuple[np.ndarray, float]: + """Renders a MIDI file to an array of samples. + + This can be useful for deubgging, for more accurate timing than + `mido.play`, and for faster than real-time rendering. + + Unlike other methods in this class, this one assumes that we are not + `amy.live`, since it calls `amy.render` to generate the samples. + + Args: + filename: Path to a MIDI file to play. + volume_db: Output volume in dB rel AMY volume=1.0. + start_millis: AMY time for the start of the file. Only matters when + `blocking == False`. + """ + amy.send(volume=np.pow(10.0, volume_db / 20.0), time=start_millis) + samples = amy.render( + self.play_file(filename, blocking=False, + start_millis=start_millis)) + return samples, amy.AMY_SAMPLE_RATE + + def play_input(self, name: str | None = None) -> None: + """Plays MIDI messages from a `mido` input. + + This is useful if you are connected to a USB instrument and have also + installed the `python-rtmidi` package. + + name: `mido` input name from which to consume messages. + `mido.get_input_names()` shows your options. USB replugging can cause + the name to change. If none, this attempts to find the first input + with "usb" in its lowercased name. + """ + if not name: + for candidate in mido.get_input_names(): + if 'usb' in candidate.lower(): + name = candidate + break + for message in mido.open_input(name): + self.play_message(message) + + +class Piano(MidoSynth): + + def __init__(self, patch_time: float | None = None): + super().__init__(num_voices=16, + patch_number=256, + patch_time=patch_time) diff --git a/experiments/mido_piano/jam.py b/experiments/mido_piano/jam.py new file mode 100644 index 00000000..66fa334c --- /dev/null +++ b/experiments/mido_piano/jam.py @@ -0,0 +1,14 @@ +"""Plays your USB MIDI input in the piano voice.""" +import amy +from experiments.mido_piano import instrument + + +def run() -> None: + amy.send(volume=2.0) + amy.live() + piano = instrument.Piano() + piano.play_input() + + +if __name__ == '__main__': + run() diff --git a/experiments/mido_piano/midi.py b/experiments/mido_piano/midi.py new file mode 100644 index 00000000..c8ccbab8 --- /dev/null +++ b/experiments/mido_piano/midi.py @@ -0,0 +1,218 @@ +"""Pared-down fork of Tulip midi.py. + +piano_recital.py in the AMY repository is the dependant motivating the fork. + +Forking avoids a dependency cycle and eliminates some opportunities for +confusion about the necessity of or assumptions about globals like midi config +and event handler. + +`Queue` was removed in favor of the standard `collections.deque`, reflecting +the assumption that piano_recital.py will usually be run under CPython. +""" +import collections +import time as time_lib + +import amy + + +class VoiceObject: + """Object to wrap an amy voice.""" + + def __init__(self, amy_voice): + self.amy_voice = amy_voice + + def note_on(self, note, vel, time=None, sequence=None): + amy.send(time=time, + voices=self.amy_voice, + note=note, + vel=vel, + sequence=sequence) + + def note_off(self, time=None, sequence=None): + amy.send(time=time, voices=self.amy_voice, vel=0, sequence=sequence) + + +class Synth: + """Manage a polyphonic synthesizer by rotating among a fixed pool of voices. + + Provides methods: + synth.note_on(midi_note, velocity, time=None, sequence=None) + synth.note_off(midi_note, time=None, sequence=None) + synth.all_notes_off() + synth.program_change(patch_num) changes preset for all voices. + synth.control_change(control, value) modifies a parameter for all voices. + Provides read-back attributes (for voices.py UI): + synth.amy_voices + synth.patch_number + synth.patch_state - patch-specific data only used by clients e.g. UI state + + Note: The synth internally refers to its voices by indices in + range(0, num_voices). These numbers are not related to the actual amy + voices rendering the note; the amy voice number is internal to the + VoiceObjects and is opaque to the Synth object. + """ + # Class-wide record of which voice to allocate next. + allocated_amy_voices = set() + next_amy_patch_number = 1024 + + @classmethod + def reset(cls): + """Resets AMY and Synth's tracking of its state.""" + cls.allocated_amy_voices = set() + cls.next_amy_patch_number = 1024 + amy.reset() + + def __init__(self, + num_voices=6, + patch_number=None, + patch_string=None, + patch_time=None): + self.voice_objs = self._get_new_voices(num_voices) + self.released_voices = collections.deque(range(num_voices)) + self.active_voices = collections.deque(tuple(), num_voices) + # Dict to look up active voice from note number, for note-off. + self.voice_of_note = {} + self.note_of_voice = [None] * num_voices + self.sustaining = False + self.sustained_notes = set() + # Fields used by UI + self.patch_number = None + self.patch_state = None + if patch_number is not None and patch_string is not None: + raise ValueError( + 'You cannot specify both patch_number and patch_string.') + if patch_string is not None: + patch_number = Synth.next_amy_patch_number + Synth.next_amy_patch_number = patch_number + 1 + amy.send(store_patch='%d,%s' % (patch_number, patch_string)) + self.program_change(patch_number, time=patch_time) + + def _get_new_voices(self, num_voices): + new_voices = [] + next_amy_voice = 0 + while len(new_voices) < num_voices: + while next_amy_voice in Synth.allocated_amy_voices: + next_amy_voice += 1 + new_voices.append(next_amy_voice) + next_amy_voice += 1 + self.amy_voice_nums = new_voices + Synth.allocated_amy_voices.update(new_voices) + voice_objects = [] + for amy_voice_num in self.amy_voice_nums: + voice_objects.append(VoiceObject(amy_voice_num)) + return voice_objects + + @property + def amy_voices(self): + return [o.amy_voice for o in self.voice_objs] + + @property + def num_voices(self): + return len(self.voice_objs) + + # send an AMY message to the voices in this synth + def amy_send(self, **kwargs): + vstr = ",".join([str(a) for a in self.amy_voice_nums]) + amy.send(voices=vstr, **kwargs) + + def _get_next_voice(self, time): + """Return the next voice to use.""" + # First try free/released_voices in order, then steal from active_voices. + if self.released_voices: + return self.released_voices.popleft() + # We have to steal an active voice. + stolen_voice = self.active_voices.popleft() + #print('Stealing voice for', self.note_of_voice[stolen_voice]) + self._voice_off(stolen_voice, time=time) + return stolen_voice + + def _voice_off(self, voice, time=None, sequence=None): + """Terminate voice, update note_of_voice, but don't alter the queues.""" + self.voice_objs[voice].note_off(time=time, sequence=sequence) + # We no longer have a voice playing this note. + del self.voice_of_note[self.note_of_voice[voice]] + self.note_of_voice[voice] = None + + def note_off(self, note, time=None, sequence=None): + if self.sustaining: + self.sustained_notes.add(note) + return + if note not in self.voice_of_note: + return + old_voice = self.voice_of_note[note] + self._voice_off(old_voice, time=time, sequence=sequence) + # Return to released. + self.active_voices.remove(old_voice) + self.released_voices.append(old_voice) + + def all_notes_off(self, time=None): + self.sustain(False, time=time) + while self.active_voices: + voice = self.active_voices.popleft() + self._voice_off(voice, time=time) + self.released_voices.append(voice) + + def note_on(self, note, velocity=1, time=None, sequence=None): + if not self.amy_voice_nums: + # Note on after synth.release()? + raise ValueError( + 'Synth note on with no voices - synth has been released?') + if velocity == 0: + self.note_off(note, time=time, sequence=sequence) + else: + # Velocity > 0, note on. + if note in self.sustained_notes: + self.sustained_notes.remove(note) + if note in self.voice_of_note: + # Send another note-on to the voice already playing this note. + new_voice = self.voice_of_note[note] + else: + new_voice = self._get_next_voice(time=time) + self.active_voices.append(new_voice) + self.voice_of_note[note] = new_voice + self.note_of_voice[new_voice] = note + self.voice_objs[new_voice].note_on(note, + velocity, + time=time, + sequence=sequence) + + def sustain(self, state, time=None): + """Turn sustain on/off.""" + if state: + self.sustaining = True + else: + self.sustaining = False + for midinote in self.sustained_notes: + self.note_off(midinote, time=time) + self.sustained_notes = set() + + def get_patch_state(self): + return self.patch_state + + def set_patch_state(self, state): + self.patch_state = state + + def program_change(self, patch_number, time=None): + if patch_number != self.patch_number: + self.patch_number = patch_number + # Reset any modified state due to previous patch modifications. + self.patch_state = None + time_lib.sleep(0.1) # "AMY queue will fill if not slept." + self.amy_send(load_patch=patch_number, time=time) + + def control_change(self, control, value, time=None): + if control == 64: + if value > 100 and not self.sustaining: + self.sustain(True, time=time) + if value < 60 and self.sustaining: + self.sustain(False, time=time) + + def release(self, time=None): + """Called to terminate this synth and release its amy_voice resources.""" + # Turn off any active notes + self.all_notes_off(time=time) + # Return all the amy_voices + for amy_voice in self.amy_voice_nums: + Synth.allocated_amy_voices.remove(amy_voice) + self.amy_voice_nums = [] + del self.voice_objs[:] diff --git a/experiments/mido_piano/program.txt b/experiments/mido_piano/program.txt new file mode 100644 index 00000000..250d7d9d --- /dev/null +++ b/experiments/mido_piano/program.txt @@ -0,0 +1,35 @@ +000-IMSLP172781-WIMA.cb18-wtc01.mid +Prelude and Fugue in C from the Well-tempered Clavier +J. S. Bach +BWV 846 +format: unknown +https://imslp.org/wiki/Special:ImagefromIndex/172781/hfpn +Accessed on January 18, 2025 +James L. Bailey +Creative Commons Attribution Non-commercial Share Alike 3.0 + +001-chopin_op66.mid +Fantasie Impromptu +Frédéric Chopin +Op. 66 +with artificial quantization of note event time. *too* perfect. +format: export from Ableton Live MIDI clip editor +Matt Harvey +Creative Commons CC0 (public domain) + +002-chopin_op_25_no_12.mid +Ocean Etude +Frédéric Chopin +Op. 25 No. 12 +with terrible mistakes +format: arecordmidi from Kawai CA-67 digital piano +Matt Harvey +Creative Commons CC0 (public domain) + +003-schumann_op_15_no_1.mid +Of Foreign Lands and Peoples +Robert Schumann +Op. 15 No. 1 +format: arecordmidi from Kawai CA-67 digital piano +Matt Harvey +Creative Commons CC0 (public domain) diff --git a/experiments/mido_piano/recital.py b/experiments/mido_piano/recital.py new file mode 100644 index 00000000..e0c99c75 --- /dev/null +++ b/experiments/mido_piano/recital.py @@ -0,0 +1,47 @@ +"""Plays a collection of MIDI files using the AMY piano voice.""" + +import os +import time +from typing import Iterable + +import amy +from experiments.mido_piano import instrument + + +def init_amy() -> None: + amy.live(audio_playback_device=0) + # Volume value was determined by making 8-finger ff chords on a Kawai CA-67 + # come just short of the soft-clipping threshold. + amy.send(volume=0.75) + amy.reverb(0.1) + amy.chorus(0) + amy.echo(0) + + +def set_list() -> Iterable[str]: + """Yields the sorted MIDI filenames in this directory.""" + directory = 'experiments/mido_piano' + for filename in sorted(os.listdir(directory)): + if filename.lower().endswith('.mid'): + yield os.path.join(directory, filename) + + +def run() -> None: + init_amy() + try: + piano = instrument.Piano() + iter_filenames = iter(set_list()) + filename = next(iter_filenames) + while True: + try: + piano.play_file(filename) + filename = next(iter_filenames) + time.sleep(2.0) + except (StopIteration, KeyboardInterrupt): + break + finally: + amy.pause() + + +if __name__ == '__main__': + run() diff --git a/experiments/mido_piano/render.py b/experiments/mido_piano/render.py new file mode 100644 index 00000000..090e667d --- /dev/null +++ b/experiments/mido_piano/render.py @@ -0,0 +1,34 @@ +"""Renders a MIDI file using AMY piano and soundfile. + +Usage: + +``` +python3 -m experiments.mido_piano.render input.mid output.wav 10.0 +``` +""" + +import os +import sys + +import soundfile + +import amy +from experiments.mido_piano import instrument + + +def run(input_filename: str, + output_filename: str, + volume_db: float = 0.0) -> None: + duration = None + try: + piano = instrument.Piano(patch_time=0.0) + samples, sample_rate = piano.render(input_filename, + volume_db=volume_db) + duration = samples.shape[0] / sample_rate + soundfile.write(output_filename, samples, int(round(sample_rate))) + finally: + piano.release(time=(duration * 1000.0)) + + +if __name__ == '__main__': + run(sys.argv[1], sys.argv[2], float(sys.argv[3]))