Skip to content

Commit

Permalink
Make the set of active fetch tasks wide, and extend timeout
Browse files Browse the repository at this point in the history
There's a surprising number of fetches that end in failure due to
timeout on multiple retries, but the 10s timeout is too short for a
number of tracks that do succeed.  Even at 30s timeout, we still get
some tracks finishing in success after 29s.  In order to ensure a
smoother pipeline of playable tracks, we lengthen the timeout to 30s,
while allowing more simultaneous active fetch requests.

Even with a very low rate of pre-cached tracks, this should result in a
_relatively_ smooth experience, with some hitches early in the session
when it hasn't yet had much time to pre-cache much.
  • Loading branch information
compenguy committed May 25, 2024
1 parent 2eb25fe commit 9942e81
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 15 deletions.
24 changes: 12 additions & 12 deletions src/caching.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ use crate::messages::{Request, State};
use crate::model::{RequestSender, StateReceiver};
use crate::track::Track;

const TASK_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10);
const MAX_ACTIVE_FETCHES: usize = 4;
const TASK_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(30);
const MAX_ACTIVE_FETCHES: usize = 8;

#[derive(Debug)]
pub(crate) struct FetchRequest {
Expand Down Expand Up @@ -38,26 +38,26 @@ impl FetchRequest {
async fn update_state(&mut self) {
// If transfer thread completed and we haven't checked the result yet:
if let Some((ref mut th, start_time)) = &mut self.task_handle {
let task_elapsed_secs = start_time.elapsed().as_secs();
trace!(
"task started {}s ago (up to a maximum of {}s)",
start_time.elapsed().as_secs(),
"task started {task_elapsed_secs}s ago (up to a maximum of {}s)",
TASK_TIMEOUT.as_secs()
);
if th.is_finished() {
match th.await {
Err(e) if e.is_cancelled() => {
debug!("Track fetch task was cancelled");
debug!("Track fetch task was cancelled after {task_elapsed_secs}s");
self.failed = true;
self.completed = false;
}
Err(e) if e.is_panic() => {
warn!("Track fetch task panicked");
warn!("Track fetch task panicked after {task_elapsed_secs}s");
self.failed = true;
self.completed = false;
// TODO: trigger a retry?
}
Err(e) => {
error!("Unhandled track fetch task error: {e:#}");
error!("Unhandled track fetch task error {e:#} after {task_elapsed_secs}s");
self.failed = true;
self.completed = false;
// TODO: trigger a retry?
Expand All @@ -70,12 +70,12 @@ impl FetchRequest {
}
self.failed = true;
self.completed = false;
error!("Error during in-flight request for track {e:#}");
error!("Error during in-flight request for track {e:#} after {task_elapsed_secs}s");
}
Ok(Ok(_)) => {
self.completed = self.track.cached();
self.failed = !self.completed;
trace!("In-flight request for track completed: {}", &self.completed);
info!("In-flight request for track completed (successful: {} retries: {}) after {task_elapsed_secs}s", &self.completed, &self.retry_count);
}
}
self.task_handle = None;
Expand All @@ -91,7 +91,7 @@ impl FetchRequest {
self.task_handle = None;
return;
} else {
trace!("Fetch task in progress");
trace!("Fetch task in progress for {task_elapsed_secs}s");
}
} else if !self.failed && !self.completed {
warn!("Unexpected condition: no track request in-flight, and it was neither failed nor completed");
Expand Down Expand Up @@ -140,10 +140,10 @@ impl FetchRequest {
return;
}
if self.track.cached() {
trace!("Cache hit!");
info!("Cache hit {}", &self.track.title);
self.completed = true;
} else {
trace!("Cache miss!");
info!("Cache miss {}", &self.track.title);
let track = self.track.clone();
let th = tokio::spawn(async move {
//trace!("Retrieving track {}...", &track.title);
Expand Down
5 changes: 2 additions & 3 deletions src/model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ pub(crate) type StateReceiver = async_broadcast::Receiver<State>;
pub(crate) type RequestSender = mpsc::Sender<Request>;
pub(crate) type RequestReceiver = mpsc::Receiver<Request>;

const FETCHLIST_MAX_LEN: usize = 4;
const FETCHLIST_MAX_LEN: usize = 8;
const PLAYLIST_MAX_LEN: usize = 12;

// player/volume: f32
Expand Down Expand Up @@ -658,8 +658,7 @@ impl Model {
self.notify_next().await?;
} else {
debug!("requested to start track, but no tracks are ready");
self.publish_state(State::Buffering)
.await?;
self.publish_state(State::Buffering).await?;
}
}
Ok(())
Expand Down

0 comments on commit 9942e81

Please sign in to comment.