Skip to content

Commit

Permalink
Merge pull request #170 from craffel/transcription
Browse files Browse the repository at this point in the history
Transcription [wip]
  • Loading branch information
justinsalamon committed Feb 22, 2016
2 parents 8af64a6 + 1077366 commit 8746248
Show file tree
Hide file tree
Showing 40 changed files with 4,815 additions and 3 deletions.
6 changes: 6 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -37,3 +37,9 @@ Thumbs.db

# Vim
*.swp

# pycharm
.idea/*

# docs
docs/_build/*
8 changes: 8 additions & 0 deletions docs/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,14 @@ The following subsections document each submodule.
:show-inheritance:
:member-order: bysource

:mod:`mir_eval.transcription`
-----------------------------
.. automodule:: mir_eval.transcription
:members:
:undoc-members:
:show-inheritance:
:member-order: bysource


Indices and tables
==================
Expand Down
62 changes: 62 additions & 0 deletions evaluators/transcription_eval.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
#!/usr/bin/env python
'''
CREATED: 2/9/16 2:59 PM by Justin Salamon <[email protected]>
Compute note transcription evaluation metrics
Usage:
./transcription_eval.py REFERENCE.TXT ESTIMATED.TXT
'''

from __future__ import print_function
import argparse
import sys
import os
import eval_utilities

import mir_eval


def process_arguments():
'''Argparse function to get the program parameters'''

parser = argparse.ArgumentParser(description='mir_eval transcription '
'evaluation')

parser.add_argument('-o',
dest='output_file',
default=None,
type=str,
action='store',
help='Store results in json format')

parser.add_argument('reference_file',
action='store',
help='path to the reference annotation file')

parser.add_argument('estimated_file',
action='store',
help='path to the estimated annotation file')

return vars(parser.parse_args(sys.argv[1:]))

if __name__ == '__main__':
# Get the parameters
parameters = process_arguments()

# Load in data
ref_intervals, ref_pitches = mir_eval.io.load_valued_intervals(
parameters['reference_file'])
est_intervals, est_pitches = mir_eval.io.load_valued_intervals(
parameters['estimated_file'])
# Compute all the scores
scores = mir_eval.transcription.evaluate(ref_intervals, ref_pitches,
est_intervals, est_pitches)
print("{} vs. {}".format(os.path.basename(parameters['reference_file']),
os.path.basename(parameters['estimated_file'])))
eval_utilities.print_evaluation(scores)

if parameters['output_file']:
print('Saving results to: ', parameters['output_file'])
eval_utilities.save_results(scores, parameters['output_file'])
1 change: 1 addition & 0 deletions mir_eval/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,5 +14,6 @@
from . import pattern
from . import tempo
from . import hierarchy
from . import transcription

__version__ = '0.2'
41 changes: 41 additions & 0 deletions mir_eval/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -399,3 +399,44 @@ def load_wav(path, mono=True):
if mono and audio_data.ndim != 1:
audio_data = audio_data.mean(axis=1)
return audio_data, fs


def load_valued_intervals(filename, delimiter=r'\s+'):
r"""Import valued intervals from an annotation file. The file should
consist of three columns: Two consisting of numeric values corresponding to
start and end time of each interval and a third, also of numeric values,
corresponding to the value of each interval. This is primarily useful for
processing events which span a duration and have a numeric value, such as
piano-roll notes which have an onset, offset, and a pitch value.
Parameters
----------
filename : str
Path to the annotation file
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
Returns
-------
intervals : np.ndarray, shape=(n_events, 2)
Array of event start and end times
values : np.ndarray, shape=(n_events,)
Array of values
"""
# Use our universal function to load in the events
starts, ends, values = load_delimited(filename, [float, float, float],
delimiter)
# Stack into an interval matrix
intervals = np.array([starts, ends]).T
# Validate them, but throw a warning in place of an error
try:
util.validate_intervals(intervals)
except ValueError as error:
warnings.warn(error.args[0])

# return values as np.ndarray
values = np.array(values)

return intervals, values
Loading

0 comments on commit 8746248

Please sign in to comment.