Skip to content

Commit

Permalink
Move test_match_notes, rename no_offset metrics
Browse files Browse the repository at this point in the history
In addition to moving test_match_notes it has also been updated.
  • Loading branch information
justinsalamon committed Feb 22, 2016
1 parent 1f3faeb commit 73318a1
Show file tree
Hide file tree
Showing 6 changed files with 66 additions and 66 deletions.
25 changes: 4 additions & 21 deletions mir_eval/transcription.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ def precision_recall_f1(ref_intervals, ref_pitches, est_intervals, est_pitches,
>>> precision, recall, f_measure =
... mir_eval.transcription.precision_recall_f1(ref_intervals,
... ref_pitches, est_intervals, est_pitches)
>>> precision_nooffset, recall_nooffset, f_measure_nooffset =
>>> precision_no_offset, recall_no_offset, f_measure_no_offset =
... mir_eval.transcription.precision_recall_f1(ref_intervals,
... ref_pitches, est_intervals, est_pitches, offset_ratio=None)
Expand Down Expand Up @@ -327,23 +327,6 @@ def evaluate(ref_intervals, ref_pitches, est_intervals, est_pitches, **kwargs):
# Compute all the metrics
scores = collections.OrderedDict()

# # Precision, recall and f-measure NOT taking note offsets into account
# kwargs['with_offset'] = False
# (scores['Precision'],
# scores['Recall'],
# scores['F-measure']) = util.filter_kwargs(precision_recall_f1,
# ref_intervals, ref_pitches,
# est_intervals, est_pitches,
# **kwargs)
#
# # Precision, recall and f-measure taking note offsets into account
# kwargs['with_offset'] = True
# (scores['Precision_with_offset'],
# scores['Recall_with_offset'],
# scores['F-measure_with_offset']) = util.filter_kwargs(
# precision_recall_f1, ref_intervals, ref_pitches, est_intervals,
# est_pitches, **kwargs)

# Precision, recall and f-measure taking note offsets into account
offset_ratio = kwargs.get('offset_ratio', .2)
if offset_ratio is not None:
Expand All @@ -355,9 +338,9 @@ def evaluate(ref_intervals, ref_pitches, est_intervals, est_pitches, **kwargs):

# Precision, recall and f-measure NOT taking note offsets into account
kwargs['offset_ratio'] = None
(scores['Precision_nooffset'],
scores['Recall_nooffset'],
scores['F-measure_nooffset']) = \
(scores['Precision_no_offset'],
scores['Recall_no_offset'],
scores['F-measure_no_offset']) = \
util.filter_kwargs(precision_recall_f1, ref_intervals, ref_pitches,
est_intervals, est_pitches, **kwargs)

Expand Down
5 changes: 0 additions & 5 deletions tests/data/transcription/est00.txt

This file was deleted.

8 changes: 0 additions & 8 deletions tests/data/transcription/output00.json

This file was deleted.

4 changes: 0 additions & 4 deletions tests/data/transcription/ref00.txt

This file was deleted.

72 changes: 62 additions & 10 deletions tests/test_transcription.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,73 @@
# CREATED: 2/9/16 2:27 PM by Justin Salamon <[email protected]>

import mir_eval
import json
import numpy as np

A_TOL = 1e-12


def _load_unit_test_reference():

ref = np.array([
[0.100, 0.300, 220.000],
[0.300, 0.400, 246.942],
[0.500, 0.600, 277.183],
[0.550, 0.650, 293.665]])

return ref[:, :2], ref[:, 2]


def _load_unit_test_estimate():

est = np.array([
[0.120, 0.290, 225.000],
[0.300, 0.350, 246.942],
[0.500, 0.600, 500.000],
[0.550, 0.600, 293.665],
[0.560, 0.650, 293.665]])

return est[:, :2], est[:, 2]


def _load_unit_test_scores():

scores = {
"Precision": 0.4,
"Recall": 0.5,
"F-measure": 0.4444444444444445,
"Precision_no_offset": 0.6,
"Recall_no_offset": 0.75,
"F-measure_no_offset": 0.6666666666666665
}

return scores


def test_match_notes():

ref_int, ref_pitch = _load_unit_test_reference()
est_int, est_pitch = _load_unit_test_estimate()

matching = \
mir_eval.transcription.match_notes(ref_int, ref_pitch, est_int,
est_pitch)

assert matching == [(0, 0), (3, 4)]

matching = \
mir_eval.transcription.match_notes(ref_int, ref_pitch, est_int,
est_pitch, offset_ratio=None)

assert matching == [(0, 0), (1, 1), (3, 3)]


def test_precision_recall_f1():

ref_int, ref_pitch = mir_eval.io.load_valued_intervals(
'tests/data/transcription/ref00.txt')
est_int, est_pitch = mir_eval.io.load_valued_intervals(
'tests/data/transcription/est00.txt')
# load test data
ref_int, ref_pitch = _load_unit_test_reference()
est_int, est_pitch = _load_unit_test_estimate()

# load expected results
scores = json.load(open('tests/data/transcription/output00.json', 'rb'))
scores = _load_unit_test_scores()

precision, recall, f_measure = \
mir_eval.transcription.precision_recall_f1(ref_int, ref_pitch, est_int,
Expand All @@ -32,7 +84,7 @@ def test_precision_recall_f1():
offset_ratio=None)

scores_gen = np.array([precision, recall, f_measure])
scores_exp = np.array([scores['Precision_nooffset'],
scores['Recall_nooffset'],
scores['F-measure_nooffset']])
scores_exp = np.array([scores['Precision_no_offset'],
scores['Recall_no_offset'],
scores['F-measure_no_offset']])
assert np.allclose(scores_exp, scores_gen, atol=A_TOL)
18 changes: 0 additions & 18 deletions tests/test_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,21 +158,3 @@ def test_bipartite_match():
for k in matching:
v = matching[k]
assert v in G[k] or k in G[v]


def test_match_notes():

ref_int, ref_pitch = mir_eval.io.load_valued_intervals(
'tests/data/transcription/ref00.txt')
est_int, est_pitch = mir_eval.io.load_valued_intervals(
'tests/data/transcription/est00.txt')

matching = mir_eval.util.match_notes(ref_int, ref_pitch, est_int,
est_pitch)

assert matching == [(0, 0), (1, 1), (3, 3)]

matching = mir_eval.util.match_notes(ref_int, ref_pitch, est_int,
est_pitch, with_offset=True)

assert matching == [(0, 0), (3, 4)]

0 comments on commit 73318a1

Please sign in to comment.