Skip to content

Commit

Permalink
updated contours, added piano roll sonification
Browse files Browse the repository at this point in the history
fixed unit tests for piano roll sonification

added a contour sonification test

fixed coveralls

fixed multi-contour sonification

fixed a basis error in multi-contour plotting

added downbeat sonification

modified downbeat metronome pitches

fixed a length issue in pitch contour sonification

expanded contour test
  • Loading branch information
bmcfee committed Sep 2, 2016
1 parent 4cf9dc4 commit 46c2042
Show file tree
Hide file tree
Showing 4 changed files with 169 additions and 53 deletions.
3 changes: 3 additions & 0 deletions .travis_dependencies.sh
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,9 @@ if [ ! -d "$src" ]; then

export PATH="$src/bin:$PATH"
conda_create
source activate $ENV_NAME
pip install python-coveralls
source deactivate
popd
else
echo "Using cached dependencies"
Expand Down
7 changes: 4 additions & 3 deletions jams/display.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,11 +88,12 @@ def pitch_contour(annotation, **kwargs):
indices = np.unique([v['index'] for v in values])

for idx in indices:
freqs = np.asarray([v['frequency'] for v in values if v['index'] == idx])
unvoiced = ~np.asarray([v['voiced'] for v in values if v['index'] == idx])
rows = annotation.data.value.apply(lambda x: x['index'] == idx).nonzero()[0]
freqs = np.asarray([values[r]['frequency'] for r in rows])
unvoiced = ~np.asarray([values[r]['voiced'] for r in rows])
freqs[unvoiced] *= -1

ax = mir_eval.display.pitch(times[:, 0], freqs, unvoiced=True,
ax = mir_eval.display.pitch(times[rows, 0], freqs, unvoiced=True,
ax=ax,
**kwargs)
return ax
Expand Down
137 changes: 106 additions & 31 deletions jams/sonify.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,25 @@
__all__ = ['sonify']


def mkclick(freq, sr=22050, duration=0.1):
'''Generate a click sample.
This replicates functionality from mir_eval.sonify.clicks,
but exposes the target frequency and duration.
'''

times = np.arange(int(sr * duration))
click = np.sin(2 * np.pi * times * freq / float(sr))
click *= np.exp(- times / (1e-2 * sr))

return click


def clicks(annotation, sr=22050, length=None, **kwargs):
'''Sonify clicks timings
'''Sonify events with clicks.
This uses mir_eval.sonify.clicks, and is appropriate for instantaneous
events such as beats or segment boundaries.
'''

interval, _ = annotation.data.to_interval_values()
Expand All @@ -33,8 +48,42 @@ def clicks(annotation, sr=22050, length=None, **kwargs):
fs=sr, length=length, **kwargs)


def downbeat(annotation, sr=22050, length=None, **kwargs):
'''Sonify beats and downbeats together.
'''

beat_click = mkclick(440 * 2, sr=sr)
downbeat_click = mkclick(440 * 3, sr=sr)

intervals, values = annotation.data.to_interval_values()

beats, downbeats = [], []

for time, value in zip(intervals[:, 0], values):
if value['position'] == 1:
downbeats.append(time)
else:
beats.append(time)

if length is None:
length = int(sr * np.max(intervals)) + len(beat_click) + 1

y = filter_kwargs(mir_eval.sonify.clicks,
np.asarray(beats),
fs=sr, length=length, click=beat_click)

y += filter_kwargs(mir_eval.sonify.clicks,
np.asarray(downbeats),
fs=sr, length=length, click=downbeat_click)

return y


def chord(annotation, sr=22050, length=None, **kwargs):
'''Sonify chords'''
'''Sonify chords
This uses mir_eval.sonify.chords.
'''

intervals, chords = annotation.data.to_interval_values()

Expand All @@ -44,45 +93,71 @@ def chord(annotation, sr=22050, length=None, **kwargs):
**kwargs)


def pitch_hz(annotation, sr=22050, length=None, **kwargs):
'''Sonify pitch contours in Hz'''
def pitch_contour(annotation, sr=22050, length=None, **kwargs):
'''Sonify pitch contours.
This uses mir_eval.sonify.pitch_contour, and should only be applied
to pitch annotations using the pitch_contour namespace.
Each contour is sonified independently, and the resulting waveforms
are summed together.
'''

times, values = annotation.data.to_interval_values()

indices = np.unique([v['index'] for v in values])

y_out = 0.0
for ix in indices:
rows = annotation.data.value.apply(lambda x: x['index'] == ix).nonzero()[0]

freqs = np.asarray([values[r]['frequency'] for r in rows])
unv = ~np.asarray([values[r]['voiced'] for r in rows])
freqs[unv] *= -1

y_out = y_out + filter_kwargs(mir_eval.sonify.pitch_contour,
times[rows, 0],
freqs,
fs=sr,
length=length,
**kwargs)
if length is None:
length = len(y_out)

return y_out


def piano_roll(annotation, sr=22050, length=None, **kwargs):
'''Sonify a piano-roll
This uses mir_eval.sonify.time_frequency, and is appropriate
for sparse transcription data, e.g., annotations in the `note_midi`
namespace.
'''

intervals, pitches = annotation.data.to_interval_values()

# Handle instantaneous pitch measurements: if at least 98% of
# observations have zero duration, call it continuous
if np.percentile(intervals[:, 0] - intervals[:, 1], 98) == 0:
intervals[:-1, 1] = intervals[:-1, 0] + np.diff(intervals[:, 0])
if annotation.duration is not None:
intervals[-1, 1] = annotation.duration
elif length is not None:
intervals[-1, 1] = length / float(sr)
# Construct the pitchogram
pitch_map = {f: idx for idx, f in enumerate(np.unique(pitches))}

if length is None:
if np.any(intervals):
length = int(np.max(intervals[:, 1]) * sr)
else:
length = 0
gram = np.zeros((len(pitch_map), len(intervals)))

# Discard anything unvoiced or zero-duration
pitches = np.asarray(pitches)
good_idx = (intervals[:, 1] > intervals[:, 0]) & (pitches > 0)
intervals = intervals[good_idx]
pitches = pitches[good_idx]
for col, f in enumerate(pitches):
gram[pitch_map[f], col] = 1

# Sonify
return filter_kwargs(mir_eval.sonify.pitch_contour,
intervals[:, 0], pitches,
fs=sr, length=length,
**kwargs)
return filter_kwargs(mir_eval.sonify.time_frequency,
gram, pitches, intervals,
sr, length=length, **kwargs)


SONIFY_MAPPING = OrderedDict()
SONIFY_MAPPING['beat_position'] = downbeat
SONIFY_MAPPING['beat'] = clicks
SONIFY_MAPPING['segment_open'] = clicks
SONIFY_MAPPING['onset'] = clicks
SONIFY_MAPPING['chord'] = chord
SONIFY_MAPPING['pitch_hz'] = pitch_hz
SONIFY_MAPPING['note_hz'] = piano_roll
SONIFY_MAPPING['pitch_contour'] = pitch_contour


def sonify(annotation, sr=22050, duration=None, **kwargs):
Expand All @@ -93,7 +168,7 @@ def sonify(annotation, sr=22050, duration=None, **kwargs):
annotation : jams.Annotation
The annotation to sonify
sr = : int > 0
sr = : positive number
The sampling rate of the output waveform
duration : float (optional)
Expand All @@ -119,8 +194,8 @@ def sonify(annotation, sr=22050, duration=None, **kwargs):

for namespace, func in six.iteritems(SONIFY_MAPPING):
try:
coerce_annotation(annotation, namespace)
return func(annotation, sr=sr, length=length, **kwargs)
ann = coerce_annotation(annotation, namespace)
return func(ann, sr=sr, length=length, **kwargs)
except NamespaceError:
pass

Expand Down
75 changes: 56 additions & 19 deletions jams/tests/sonify_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@

import jams


@raises(jams.NamespaceError)
def test_no_sonify():

Expand All @@ -23,6 +24,7 @@ def test_bad_sonify():

jams.sonify.sonify(ann)


def test_duration():

def __test(ns, duration, sr):
Expand All @@ -34,46 +36,62 @@ def __test(ns, duration, sr):
if duration is not None:
eq_(len(y), int(sr * duration))


for ns in ['segment_open', 'chord']:
for sr in [8000, 11025]:
for dur in [None, 5.0, 10.0]:
yield __test, ns, dur, sr


def test_pitch_hz():
ann = jams.Annotation(namespace='pitch_hz')
def test_note_hz():
ann = jams.Annotation(namespace='note_hz')
ann.append(time=0, duration=1, value=261.0)
y = jams.sonify.sonify(ann, sr=8000, duration=2.0)

eq_(len(y), 8000 * 2)


def test_pitch_hz_neg():
ann = jams.Annotation(namespace='pitch_hz')
ann.append(time=0, duration=1, value=-261.0)
ann.append(time=1, duration=1, value=0.0)
y = jams.sonify.sonify(ann, sr=8000, duration=2.0)

eq_(len(y), 8000 * 2)
assert not np.any(y)

def test_pitch_hz_neg_nolength():
ann = jams.Annotation(namespace='pitch_hz')
ann.append(time=0, duration=1, value=-261.0)
def test_note_hz_nolength():
ann = jams.Annotation(namespace='note_hz')
ann.append(time=0, duration=1, value=261.0)
y = jams.sonify.sonify(ann, sr=8000)

eq_(len(y), 8000 * 1)
assert not np.any(y)
assert np.any(y)


def test_pitch_midi():
ann = jams.Annotation(namespace='pitch_midi')
def test_note_midi():
ann = jams.Annotation(namespace='note_midi')
ann.append(time=0, duration=1, value=60)
y = jams.sonify.sonify(ann, sr=8000, duration=2.0)

eq_(len(y), 8000 * 2)


def test_contour():
ann = jams.Annotation(namespace='pitch_contour')

duration = 5.0
fs = 0.01
# Generate a contour with deep vibrato and no voicing from 3s-4s
times = np.linspace(0, duration, num=int(duration / fs))
rate = 5
vibrato = 220 + 20 * np.sin(2 * np.pi * times * rate)

for t, v in zip(times, vibrato):
ann.append(time=t, duration=fs, value={'frequency': v,
'index': 0,
'voiced': (t < 3 or t > 4)})


def __test(ann, duration):
y = jams.sonify.sonify(ann, sr=8000, duration=duration)
if duration is not None:
eq_(len(y), 8000 * duration)

for duration in [None, 5.0, 10.0]:
yield __test, ann, duration


def test_chord():

def __test(namespace, value):
Expand All @@ -93,9 +111,28 @@ def __test(namespace, value):
ann = jams.Annotation(namespace=namespace)
ann.append(time=0.5, duration=0, value=value)
y = jams.sonify.sonify(ann, sr=8000, duration=2.0)

eq_(len(y), 8000 * 2)

yield __test, 'beat', 1
yield __test, 'segment_open', 'C'
yield __test, 'onset', 1


def test_beat_position():

def __test(ann, sr, duration):
yout = jams.sonify.sonify(ann, sr=sr, duration=duration)
if duration is not None:
eq_(len(yout), int(duration * sr))

ann = jams.Annotation(namespace='beat_position')

for i, t in enumerate(np.arange(0, 10, 0.25)):
ann.append(time=t, duration=0,
value=dict(position=1 + i % 4,
measure=1 + i // 4,
num_beats=4,
beat_units=4))

for length in [None, 5, 15]:
yield __test, ann, 8000, length

0 comments on commit 46c2042

Please sign in to comment.