From 7b1be80ef6d61a95df69d4db4a1baefc234babf2 Mon Sep 17 00:00:00 2001 From: anonymous781384 Date: Thu, 2 May 2024 15:07:11 +0200 Subject: [PATCH] midi table update 3 --- _config.yml | 3 +++ index.md | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/_config.yml b/_config.yml index 0ec3ab5..cbf7152 100644 --- a/_config.yml +++ b/_config.yml @@ -1,2 +1,5 @@ #title: [Combining audio control and style transfer using latent diffusion] title: null +markdown: redcarpet +redcarpet: + extensions: [footnotes] \ No newline at end of file diff --git a/index.md b/index.md index cb99647..c72a4b7 100644 --- a/index.md +++ b/index.md @@ -1,6 +1,6 @@
-# Combining audio control and style transfer using latent diffusion +

Combining audio control and style transfer using latent diffusion

@@ -18,7 +18,7 @@ In this paper, we aim to unify explicit control and style transfer within a sing # MIDI-to-audio -Examples in MIDI-to-audio generation on the [Slakh dataset](http://www.slakh.com/) . For each midi file, we present results in reconstruction (using the original audio associated with the midi file) and transfer to a different recording timbre. For the baseline SpecDiff (Multi-instrument music synthesis with spectrogram diffusion [^1]), we swap the MIDI instrument program to the one of the target timbre sample. +Examples in MIDI-to-audio generation on the [Slakh dataset](http://www.slakh.com/) . For each midi file, we present results in reconstruction (using the original audio associated with the midi file) and transfer to a different recording timbre. For the baseline SpecDiff (Multi-instrument music synthesis with spectrogram diffusion[^1]), we swap the MIDI instrument program to the one of the target timbre sample.