From 7844a8ecb1a61c09b428aa2c4228f8c49ffbb5fb Mon Sep 17 00:00:00 2001 From: Njuguna Mureithi Date: Wed, 22 May 2024 22:11:45 +0300 Subject: [PATCH 01/59] fix: improve external api for redis --- packages/apalis-redis/src/lib.rs | 1 + packages/apalis-redis/src/storage.rs | 45 +++++++++++++++++++++------- 2 files changed, 36 insertions(+), 10 deletions(-) diff --git a/packages/apalis-redis/src/lib.rs b/packages/apalis-redis/src/lib.rs index 828beb25..7f503363 100644 --- a/packages/apalis-redis/src/lib.rs +++ b/packages/apalis-redis/src/lib.rs @@ -31,3 +31,4 @@ mod storage; pub use storage::connect; pub use storage::Config; pub use storage::RedisStorage; +pub use storage::RedisQueueInfo; diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index f39b3629..572c10ac 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -40,17 +40,37 @@ const JOB_DATA_HASH: &str = "{queue}:data"; const SCHEDULED_JOBS_SET: &str = "{queue}:scheduled"; const SIGNAL_LIST: &str = "{queue}:signal"; +/// Represents redis key names for various components of the RedisStorage. +/// +/// This struct defines keys used in Redis to manage jobs and their lifecycle in the storage. #[derive(Clone, Debug)] -struct RedisQueueInfo { - active_jobs_list: String, - consumers_set: String, - dead_jobs_set: String, - done_jobs_set: String, - failed_jobs_set: String, - inflight_jobs_set: String, - job_data_hash: String, - scheduled_jobs_set: String, - signal_list: String, +pub struct RedisQueueInfo { + /// Key for the list of currently active jobs. + pub active_jobs_list: String, + + /// Key for the set of active consumers. + pub consumers_set: String, + + /// Key for the set of jobs that are no longer retryable. + pub dead_jobs_set: String, + + /// Key for the set of jobs that have completed successfully. + pub done_jobs_set: String, + + /// Key for the set of jobs that have failed. + pub failed_jobs_set: String, + + /// Key for the set of jobs that are currently being processed. + pub inflight_jobs_set: String, + + /// Key for the hash storing data for each job. + pub job_data_hash: String, + + /// Key for the set of jobs scheduled for future execution. + pub scheduled_jobs_set: String, + + /// Key for the list used for signaling and communication between consumers and producers. + pub signal_list: String, } #[derive(Clone, Debug)] @@ -275,6 +295,11 @@ impl RedisStorage { pub fn get_connection(&self) -> ConnectionManager { self.conn.clone() } + + /// Get the underlying queue details + pub fn get_queue(&self) -> &RedisQueueInfo { + &self.queue + } } impl Backend> From fd6c1101e1da2431fde14969ba0b0d0b875e6af8 Mon Sep 17 00:00:00 2001 From: Njuguna Mureithi Date: Thu, 23 May 2024 08:56:29 +0300 Subject: [PATCH 02/59] fix: improve exports for redis --- packages/apalis-redis/src/lib.rs | 2 ++ packages/apalis-redis/src/storage.rs | 7 ++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/packages/apalis-redis/src/lib.rs b/packages/apalis-redis/src/lib.rs index 7f503363..d36e823e 100644 --- a/packages/apalis-redis/src/lib.rs +++ b/packages/apalis-redis/src/lib.rs @@ -32,3 +32,5 @@ pub use storage::connect; pub use storage::Config; pub use storage::RedisStorage; pub use storage::RedisQueueInfo; +pub use storage::RedisCodec; +pub use storage::RedisJob; diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index 572c10ac..8e33d77e 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -89,7 +89,7 @@ struct RedisScript { } #[derive(Clone, Debug, Serialize, Deserialize)] -struct RedisJob { +pub struct RedisJob { ctx: Context, job: J, } @@ -202,7 +202,8 @@ impl Config { } } -type InnerCodec = Arc< +/// The codec used by redis to encode and decode jobs +pub type RedisCodec = Arc< Box, Vec, Error = apalis_core::error::Error> + Sync + Send + 'static>, >; @@ -214,7 +215,7 @@ pub struct RedisStorage { scripts: RedisScript, controller: Controller, config: Config, - codec: InnerCodec, + codec: RedisCodec, } impl fmt::Debug for RedisStorage { From 6203fa093d2e39380f96a126e20d1bfd85af584d Mon Sep 17 00:00:00 2001 From: Njuguna Mureithi Date: Thu, 23 May 2024 09:13:05 +0300 Subject: [PATCH 03/59] fix: expose redis codec --- packages/apalis-redis/src/storage.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index 8e33d77e..daf2403e 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -301,6 +301,11 @@ impl RedisStorage { pub fn get_queue(&self) -> &RedisQueueInfo { &self.queue } + + /// Get the underlying queue details + pub fn get_codec(&self) -> &RedisCodec { + &self.codec + } } impl Backend> From 59ed0f43808d529341540676210bc0ea5095f4d5 Mon Sep 17 00:00:00 2001 From: geofmureithi Date: Wed, 26 Jun 2024 21:46:24 +0300 Subject: [PATCH 04/59] Feature: v0.6.0-alpha.0 version of apalis Breaking Changes: - Dropped traits Job and Message, please use namespace --- Cargo.toml | 12 +- examples/async-std-runtime/Cargo.toml | 2 +- examples/async-std-runtime/src/main.rs | 2 +- examples/axum/src/main.rs | 2 +- examples/basics/Cargo.toml | 2 +- examples/basics/src/main.rs | 2 +- examples/email-service/src/lib.rs | 5 - examples/mysql/Cargo.toml | 2 +- examples/mysql/src/main.rs | 2 +- examples/postgres/Cargo.toml | 2 +- examples/prometheus/src/main.rs | 2 +- examples/redis/Cargo.toml | 2 +- examples/rest-api/src/main.rs | 12 +- examples/sentry/Cargo.toml | 2 +- examples/sqlite/Cargo.toml | 2 +- examples/sqlite/src/job.rs | 5 - examples/tracing/Cargo.toml | 2 +- examples/tracing/src/main.rs | 5 +- packages/apalis-core/Cargo.toml | 3 +- packages/apalis-core/src/mq/mod.rs | 16 -- packages/apalis-core/src/storage/mod.rs | 18 +- packages/apalis-core/src/task/mod.rs | 2 + packages/apalis-core/src/task/namespace.rs | 39 ++++ packages/apalis-cron/Cargo.toml | 4 +- packages/apalis-redis/Cargo.toml | 4 +- packages/apalis-redis/src/lib.rs | 4 +- packages/apalis-redis/src/storage.rs | 212 ++++++++++++++------- packages/apalis-sql/Cargo.toml | 4 +- packages/apalis-sql/src/lib.rs | 10 + packages/apalis-sql/src/mysql.rs | 31 +-- packages/apalis-sql/src/postgres.rs | 36 ++-- packages/apalis-sql/src/sqlite.rs | 46 ++--- src/layers/prometheus/mod.rs | 16 +- src/layers/sentry/mod.rs | 8 +- src/lib.rs | 4 +- 35 files changed, 310 insertions(+), 212 deletions(-) create mode 100644 packages/apalis-core/src/task/namespace.rs diff --git a/Cargo.toml b/Cargo.toml index e065dba4..9a346828 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis" -version = "0.5.3" +version = "0.6.0-alpha.0" authors = ["Geoffrey Mureithi "] description = "Simple, extensible multithreaded background job processing for Rust" repository = "https://github.com/geofmureithi/apalis" @@ -71,26 +71,26 @@ layers = [ docsrs = ["document-features"] [dependencies.apalis-redis] -version = "0.5.3" +version = "0.6.0-alpha.0" optional = true path = "./packages/apalis-redis" default-features = false [dependencies.apalis-sql] -version = "0.5.3" +version = "0.6.0-alpha.0" features = ["migrate"] optional = true default-features = false path = "./packages/apalis-sql" [dependencies.apalis-core] -version = "0.5.3" +version = "0.6.0-alpha.0" default-features = false path = "./packages/apalis-core" [dependencies.apalis-cron] -version = "0.5.3" +version = "0.6.0-alpha.0" optional = true default-features = false path = "./packages/apalis-cron" @@ -171,6 +171,6 @@ ulid = { version = "1", optional = true } serde = { version = "1.0", features = ["derive"] } [dependencies.tracing] -default_features = false +default-features = false version = "0.1.40" optional = true diff --git a/examples/async-std-runtime/Cargo.toml b/examples/async-std-runtime/Cargo.toml index 309b15d4..b3645e4f 100644 --- a/examples/async-std-runtime/Cargo.toml +++ b/examples/async-std-runtime/Cargo.toml @@ -23,5 +23,5 @@ ctrlc = "3.2.5" async-channel = "2" [dependencies.tracing] -default_features = false +default-features = false version = "0.1" diff --git a/examples/async-std-runtime/src/main.rs b/examples/async-std-runtime/src/main.rs index 646bb05c..0fcd9955 100644 --- a/examples/async-std-runtime/src/main.rs +++ b/examples/async-std-runtime/src/main.rs @@ -23,7 +23,7 @@ impl From> for Reminder { async fn send_in_background(reminder: Reminder) { apalis_core::sleep(Duration::from_secs(2)).await; - debug!("Called at {reminder:?}"); + debug!("Called at {:?}", reminder.0); } async fn send_reminder(reminder: Reminder, worker: WorkerCtx) -> bool { // this will happen in the workers background and wont block the next tasks diff --git a/examples/axum/src/main.rs b/examples/axum/src/main.rs index a4b7146d..590cd32b 100644 --- a/examples/axum/src/main.rs +++ b/examples/axum/src/main.rs @@ -29,7 +29,7 @@ async fn add_new_job( Extension(mut storage): Extension>, ) -> impl IntoResponse where - T: 'static + Debug + Job + Serialize + DeserializeOwned + Send + Sync + Unpin, + T: 'static + Debug + Serialize + DeserializeOwned + Send + Sync + Unpin, { dbg!(&input); let new_job = storage.push(input).await; diff --git a/examples/basics/Cargo.toml b/examples/basics/Cargo.toml index e09b3879..c6589c25 100644 --- a/examples/basics/Cargo.toml +++ b/examples/basics/Cargo.toml @@ -17,5 +17,5 @@ tower = "0.4" [dependencies.tracing] -default_features = false +default-features = false version = "0.1" diff --git a/examples/basics/src/main.rs b/examples/basics/src/main.rs index cc7fe38d..341f8f55 100644 --- a/examples/basics/src/main.rs +++ b/examples/basics/src/main.rs @@ -98,7 +98,7 @@ async fn main() -> Result<(), std::io::Error> { Monitor::::new() .register_with_count(2, { - WorkerBuilder::new("tasty-banana".to_string()) + WorkerBuilder::new("tasty-banana") .layer(TraceLayer::new()) .layer(LogLayer::new("some-log-example")) // Add shared context to all jobs executed by this worker diff --git a/examples/email-service/src/lib.rs b/examples/email-service/src/lib.rs index 323e115a..f5f833dd 100644 --- a/examples/email-service/src/lib.rs +++ b/examples/email-service/src/lib.rs @@ -1,4 +1,3 @@ -use apalis::prelude::*; use serde::{Deserialize, Serialize}; #[derive(Debug, Deserialize, Serialize, Clone)] @@ -8,10 +7,6 @@ pub struct Email { pub text: String, } -impl Job for Email { - const NAME: &'static str = "apalis::Email"; -} - pub async fn send_email(job: Email) { log::info!("Attempting to send email to {}", job.to); } diff --git a/examples/mysql/Cargo.toml b/examples/mysql/Cargo.toml index b180d76d..bbd3a96b 100644 --- a/examples/mysql/Cargo.toml +++ b/examples/mysql/Cargo.toml @@ -20,5 +20,5 @@ email-service = { path = "../email-service" } [dependencies.tracing] -default_features = false +default-features = false version = "0.1" diff --git a/examples/mysql/src/main.rs b/examples/mysql/src/main.rs index 848f6f0b..9ec2dca3 100644 --- a/examples/mysql/src/main.rs +++ b/examples/mysql/src/main.rs @@ -34,7 +34,7 @@ async fn main() -> Result<()> { Monitor::new_with_executor(TokioExecutor) .register_with_count(1, { - WorkerBuilder::new(format!("tasty-avocado")) + WorkerBuilder::new("tasty-avocado") .layer(TraceLayer::new()) .with_storage(mysql) .build_fn(send_email) diff --git a/examples/postgres/Cargo.toml b/examples/postgres/Cargo.toml index 07eadc67..bff70949 100644 --- a/examples/postgres/Cargo.toml +++ b/examples/postgres/Cargo.toml @@ -16,5 +16,5 @@ email-service = { path = "../email-service" } tower = { version = "0.4", features = ["buffer"] } [dependencies.tracing] -default_features = false +default-features = false version = "0.1" diff --git a/examples/prometheus/src/main.rs b/examples/prometheus/src/main.rs index 1288b41e..4f680cd1 100644 --- a/examples/prometheus/src/main.rs +++ b/examples/prometheus/src/main.rs @@ -87,7 +87,7 @@ async fn add_new_job( Extension(mut storage): Extension>, ) -> impl IntoResponse where - T: 'static + Debug + Job + Serialize + DeserializeOwned + Unpin + Send + Sync, + T: 'static + Debug + Serialize + DeserializeOwned + Unpin + Send + Sync, { dbg!(&input); let new_job = storage.push(input).await; diff --git a/examples/redis/Cargo.toml b/examples/redis/Cargo.toml index 6f5c9937..2f8a9be5 100644 --- a/examples/redis/Cargo.toml +++ b/examples/redis/Cargo.toml @@ -17,5 +17,5 @@ email-service = { path = "../email-service" } [dependencies.tracing] -default_features = false +default-features = false version = "0.1" diff --git a/examples/rest-api/src/main.rs b/examples/rest-api/src/main.rs index 0f90ff03..79939321 100644 --- a/examples/rest-api/src/main.rs +++ b/examples/rest-api/src/main.rs @@ -76,7 +76,7 @@ struct Filter { async fn push_job(job: web::Json, storage: web::Data) -> HttpResponse where - J: Job + Serialize + DeserializeOwned + 'static, + J: Serialize + DeserializeOwned + 'static, S: Storage, { let storage = &*storage.into_inner(); @@ -90,7 +90,7 @@ where async fn get_jobs(storage: web::Data, filter: web::Query) -> HttpResponse where - J: Job + Serialize + DeserializeOwned + 'static, + J: Serialize + DeserializeOwned + 'static, S: Storage + JobStreamExt + Send, { let storage = &*storage.into_inner(); @@ -106,7 +106,7 @@ where async fn get_workers(storage: web::Data) -> HttpResponse where - J: Job + Serialize + DeserializeOwned + 'static, + J: Serialize + DeserializeOwned + 'static, S: Storage + JobStreamExt, { let storage = &*storage.into_inner(); @@ -120,7 +120,7 @@ where async fn get_job(job_id: web::Path, storage: web::Data) -> HttpResponse where - J: Job + Serialize + DeserializeOwned + 'static, + J: Serialize + DeserializeOwned + 'static, S: Storage + 'static, { let storage = &*storage.into_inner(); @@ -140,7 +140,7 @@ trait StorageRest: Storage { impl StorageRest for S where S: Storage + JobStreamExt + 'static, - J: Job + Serialize + DeserializeOwned + 'static, + J: Serialize + DeserializeOwned + 'static, { fn name(&self) -> String { J::NAME.to_string() @@ -165,7 +165,7 @@ struct StorageApiBuilder { impl StorageApiBuilder { fn add_storage(mut self, storage: S) -> Self where - J: Job + Serialize + DeserializeOwned + 'static, + J: Serialize + DeserializeOwned + 'static, S: StorageRest + JobStreamExt, S: Storage, S: 'static + Send, diff --git a/examples/sentry/Cargo.toml b/examples/sentry/Cargo.toml index 63ee02b7..20b1a1a5 100644 --- a/examples/sentry/Cargo.toml +++ b/examples/sentry/Cargo.toml @@ -20,5 +20,5 @@ email-service = { path = "../email-service" } [dependencies.tracing] -default_features = false +default-features = false version = "0.1" diff --git a/examples/sqlite/Cargo.toml b/examples/sqlite/Cargo.toml index 90219e4e..4ee1ed41 100644 --- a/examples/sqlite/Cargo.toml +++ b/examples/sqlite/Cargo.toml @@ -21,7 +21,7 @@ email-service = { path = "../email-service" } [dependencies.tracing] -default_features = false +default-features = false version = "0.1" [dependencies.sqlx] diff --git a/examples/sqlite/src/job.rs b/examples/sqlite/src/job.rs index a8311d20..4e0ddede 100644 --- a/examples/sqlite/src/job.rs +++ b/examples/sqlite/src/job.rs @@ -1,4 +1,3 @@ -use apalis::prelude::*; use serde::{Deserialize, Serialize}; #[derive(Debug, Deserialize, Serialize)] @@ -7,10 +6,6 @@ pub struct Notification { pub text: String, } -impl Job for Notification { - const NAME: &'static str = "apalis::Notification"; -} - pub async fn notify(job: Notification) { tracing::info!("Attempting to send notification to {}", job.to); } diff --git a/examples/tracing/Cargo.toml b/examples/tracing/Cargo.toml index 9c6d8aaf..7872597c 100644 --- a/examples/tracing/Cargo.toml +++ b/examples/tracing/Cargo.toml @@ -18,5 +18,5 @@ futures = "0.3" [dependencies.tracing] -default_features = false +default-features = false version = "0.1" diff --git a/examples/tracing/src/main.rs b/examples/tracing/src/main.rs index ad56a8f5..64323b5d 100644 --- a/examples/tracing/src/main.rs +++ b/examples/tracing/src/main.rs @@ -29,10 +29,11 @@ impl fmt::Display for InvalidEmailError { impl Error for InvalidEmailError {} -async fn email_service(_email: Email) { +async fn email_service(email: Email) -> Result<(), InvalidEmailError> { tracing::info!("Checking if dns configured"); sleep(Duration::from_millis(1008)).await; - tracing::info!("Sent in 1 sec"); + tracing::info!("Failed in 1 sec"); + Err(InvalidEmailError { email: email.to }) } async fn produce_jobs(mut storage: RedisStorage) -> Result<()> { diff --git a/packages/apalis-core/Cargo.toml b/packages/apalis-core/Cargo.toml index e177503f..5122ee5a 100644 --- a/packages/apalis-core/Cargo.toml +++ b/packages/apalis-core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-core" -version = "0.5.3" +version = "0.6.0-alpha.0" authors = ["Njuguna Mureithi "] edition = "2021" license = "MIT" @@ -43,4 +43,3 @@ all-features = true [dev-dependencies] tokio = { version = "1.37.0", features = ["macros", "rt", "sync"] } tokio-stream = "0.1.15" - diff --git a/packages/apalis-core/src/mq/mod.rs b/packages/apalis-core/src/mq/mod.rs index 3a0eb612..0183294b 100644 --- a/packages/apalis-core/src/mq/mod.rs +++ b/packages/apalis-core/src/mq/mod.rs @@ -21,19 +21,3 @@ pub trait MessageQueue: Backend> { /// Returns the current size of the queue. fn size(&self) -> impl Future> + Send; } - -/// Trait representing a job. -/// -/// -/// # Example -/// ```rust -/// # use apalis_core::mq::Message; -/// # struct Email; -/// impl Message for Email { -/// const NAME: &'static str = "redis::Email"; -/// } -/// ``` -pub trait Message { - /// Represents the name for job. - const NAME: &'static str; -} diff --git a/packages/apalis-core/src/storage/mod.rs b/packages/apalis-core/src/storage/mod.rs index 557154d7..a8e3d1e6 100644 --- a/packages/apalis-core/src/storage/mod.rs +++ b/packages/apalis-core/src/storage/mod.rs @@ -11,7 +11,7 @@ pub type StorageStream = BoxStream<'static, Result>, E>> /// The underlying type must implement [Job] pub trait Storage: Backend> { /// The type of job that can be persisted - type Job: Job; + type Job; /// The error produced by the storage type Error; @@ -60,19 +60,3 @@ pub trait Storage: Backend> { /// Vacuum the storage, removes done and killed jobs fn vacuum(&self) -> impl Future> + Send; } - -/// Trait representing a job. -/// -/// -/// # Example -/// ```rust -/// # use apalis_core::storage::Job; -/// # struct Email; -/// impl Job for Email { -/// const NAME: &'static str = "apalis::Email"; -/// } -/// ``` -pub trait Job { - /// Represents the name for job. - const NAME: &'static str; -} diff --git a/packages/apalis-core/src/task/mod.rs b/packages/apalis-core/src/task/mod.rs index e50b7825..169bd614 100644 --- a/packages/apalis-core/src/task/mod.rs +++ b/packages/apalis-core/src/task/mod.rs @@ -1,4 +1,6 @@ /// A unique tracker for number of attempts pub mod attempt; +/// A wrapper type for storing the namespace +pub mod namespace; /// A unique ID that can be used by a backend pub mod task_id; diff --git a/packages/apalis-core/src/task/namespace.rs b/packages/apalis-core/src/task/namespace.rs new file mode 100644 index 00000000..c38f60bb --- /dev/null +++ b/packages/apalis-core/src/task/namespace.rs @@ -0,0 +1,39 @@ +use std::convert::From; +use std::fmt::{self, Display, Formatter}; +use std::ops::Deref; + +/// A wrapper type that defines a task's namespace. +#[derive(Debug, Clone)] +pub struct Namespace(pub String); + +impl Deref for Namespace { + type Target = String; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl Display for Namespace { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl From for Namespace { + fn from(s: String) -> Self { + Namespace(s) + } +} + +impl From for String { + fn from(value: Namespace) -> String { + value.0 + } +} + +impl AsRef for Namespace { + fn as_ref(&self) -> &str { + &self.0 + } +} diff --git a/packages/apalis-cron/Cargo.toml b/packages/apalis-cron/Cargo.toml index 4a6e2ea0..edc847fa 100644 --- a/packages/apalis-cron/Cargo.toml +++ b/packages/apalis-cron/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-cron" -version = "0.5.3" +version = "0.6.0-alpha.0" edition = "2021" authors = ["Njuguna Mureithi "] license = "MIT" @@ -9,7 +9,7 @@ description = "A simple yet extensible library for cron-like job scheduling for # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -apalis-core = { path = "../../packages/apalis-core", version = "0.5.3", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-alpha.0", default-features = false, features = [ "sleep", "json", ] } diff --git a/packages/apalis-redis/Cargo.toml b/packages/apalis-redis/Cargo.toml index 4789f008..688a4d24 100644 --- a/packages/apalis-redis/Cargo.toml +++ b/packages/apalis-redis/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-redis" -version = "0.5.3" +version = "0.6.0-alpha.0" authors = ["Njuguna Mureithi "] edition = "2021" readme = "../../README.md" @@ -11,7 +11,7 @@ description = "Redis Storage for apalis: use Redis for background jobs and messa # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -apalis-core = { path = "../../packages/apalis-core", version = "0.5.3", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-alpha.0", default-features = false, features = [ "sleep", "json", ] } diff --git a/packages/apalis-redis/src/lib.rs b/packages/apalis-redis/src/lib.rs index d36e823e..78151282 100644 --- a/packages/apalis-redis/src/lib.rs +++ b/packages/apalis-redis/src/lib.rs @@ -30,7 +30,7 @@ mod storage; pub use storage::connect; pub use storage::Config; -pub use storage::RedisStorage; -pub use storage::RedisQueueInfo; pub use storage::RedisCodec; pub use storage::RedisJob; +pub use storage::RedisQueueInfo; +pub use storage::RedisStorage; diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index daf2403e..0b45d23c 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -6,8 +6,9 @@ use apalis_core::poller::controller::Controller; use apalis_core::poller::stream::BackendStream; use apalis_core::poller::Poller; use apalis_core::request::{Request, RequestStream}; -use apalis_core::storage::{Job, Storage}; +use apalis_core::storage::Storage; use apalis_core::task::attempt::Attempt; +use apalis_core::task::namespace::Namespace; use apalis_core::task::task_id::TaskId; use apalis_core::worker::WorkerId; use apalis_core::{Backend, Codec}; @@ -88,6 +89,7 @@ struct RedisScript { vacuum: Script, } +/// The actual structure of a Redis job #[derive(Clone, Debug, Serialize, Deserialize)] pub struct RedisJob { ctx: Context, @@ -136,6 +138,7 @@ pub struct Config { max_retries: usize, keep_alive: Duration, enqueue_scheduled: Duration, + namespace: String, } impl Default for Config { @@ -146,6 +149,7 @@ impl Default for Config { max_retries: 5, keep_alive: Duration::from_secs(30), enqueue_scheduled: Duration::from_secs(30), + namespace: String::from("apalis::redis"), } } } @@ -200,6 +204,92 @@ impl Config { pub fn set_enqueue_scheduled(&mut self, enqueue_scheduled: Duration) { self.enqueue_scheduled = enqueue_scheduled; } + + /// set the namespace for the Storage + pub fn set_namespace(&mut self, namespace: String) { + self.namespace = namespace; + } + + /// Returns the Redis key for the list of active jobs associated with the queue. + /// The key is dynamically generated using the namespace of the queue. + /// + /// # Returns + /// A `String` representing the Redis key for the active jobs list. + pub fn active_jobs_list(&self) -> String { + ACTIVE_JOBS_LIST.replace("{queue}", &self.namespace) + } + + /// Returns the Redis key for the set of consumers associated with the queue. + /// The key is dynamically generated using the namespace of the queue. + /// + /// # Returns + /// A `String` representing the Redis key for the consumers set. + pub fn consumers_set(&self) -> String { + CONSUMERS_SET.replace("{queue}", &self.namespace) + } + + /// Returns the Redis key for the set of dead jobs associated with the queue. + /// The key is dynamically generated using the namespace of the queue. + /// + /// # Returns + /// A `String` representing the Redis key for the dead jobs set. + pub fn dead_jobs_set(&self) -> String { + DEAD_JOBS_SET.replace("{queue}", &self.namespace) + } + + /// Returns the Redis key for the set of done jobs associated with the queue. + /// The key is dynamically generated using the namespace of the queue. + /// + /// # Returns + /// A `String` representing the Redis key for the done jobs set. + pub fn done_jobs_set(&self) -> String { + DONE_JOBS_SET.replace("{queue}", &self.namespace) + } + + /// Returns the Redis key for the set of failed jobs associated with the queue. + /// The key is dynamically generated using the namespace of the queue. + /// + /// # Returns + /// A `String` representing the Redis key for the failed jobs set. + pub fn failed_jobs_set(&self) -> String { + FAILED_JOBS_SET.replace("{queue}", &self.namespace) + } + + /// Returns the Redis key for the set of inflight jobs associated with the queue. + /// The key is dynamically generated using the namespace of the queue. + /// + /// # Returns + /// A `String` representing the Redis key for the inflight jobs set. + pub fn inflight_jobs_set(&self) -> String { + INFLIGHT_JOB_SET.replace("{queue}", &self.namespace) + } + + /// Returns the Redis key for the hash storing job data associated with the queue. + /// The key is dynamically generated using the namespace of the queue. + /// + /// # Returns + /// A `String` representing the Redis key for the job data hash. + pub fn job_data_hash(&self) -> String { + JOB_DATA_HASH.replace("{queue}", &self.namespace) + } + + /// Returns the Redis key for the set of scheduled jobs associated with the queue. + /// The key is dynamically generated using the namespace of the queue. + /// + /// # Returns + /// A `String` representing the Redis key for the scheduled jobs set. + pub fn scheduled_jobs_set(&self) -> String { + SCHEDULED_JOBS_SET.replace("{queue}", &self.namespace) + } + + /// Returns the Redis key for the list of signals associated with the queue. + /// The key is dynamically generated using the namespace of the queue. + /// + /// # Returns + /// A `String` representing the Redis key for the signal list. + pub fn signal_list(&self) -> String { + SIGNAL_LIST.replace("{queue}", &self.namespace) + } } /// The codec used by redis to encode and decode jobs @@ -211,7 +301,6 @@ pub type RedisCodec = Arc< pub struct RedisStorage { conn: ConnectionManager, job_type: PhantomData, - queue: RedisQueueInfo, scripts: RedisScript, controller: Controller, config: Config, @@ -223,7 +312,6 @@ impl fmt::Debug for RedisStorage { f.debug_struct("RedisStorage") .field("conn", &"ConnectionManager") .field("job_type", &std::any::type_name::()) - .field("queue", &self.queue) .field("scripts", &self.scripts) .field("config", &self.config) .finish() @@ -235,7 +323,6 @@ impl Clone for RedisStorage { Self { conn: self.conn.clone(), job_type: PhantomData, - queue: self.queue.clone(), scripts: self.scripts.clone(), controller: self.controller.clone(), config: self.config.clone(), @@ -244,7 +331,7 @@ impl Clone for RedisStorage { } } -impl RedisStorage { +impl RedisStorage { /// Start a new connection pub fn new(conn: ConnectionManager) -> Self { Self::new_with_config(conn, Config::default()) @@ -252,24 +339,12 @@ impl RedisStorage { /// Start a new connection providing custom config pub fn new_with_config(conn: ConnectionManager, config: Config) -> Self { - let name = T::NAME; RedisStorage { conn, job_type: PhantomData, controller: Controller::new(), config, codec: Arc::new(Box::new(JsonCodec)), - queue: RedisQueueInfo { - active_jobs_list: ACTIVE_JOBS_LIST.replace("{queue}", name), - consumers_set: CONSUMERS_SET.replace("{queue}", name), - dead_jobs_set: DEAD_JOBS_SET.replace("{queue}", name), - done_jobs_set: DONE_JOBS_SET.replace("{queue}", name), - failed_jobs_set: FAILED_JOBS_SET.replace("{queue}", name), - inflight_jobs_set: INFLIGHT_JOB_SET.replace("{queue}", name), - job_data_hash: JOB_DATA_HASH.replace("{queue}", name), - scheduled_jobs_set: SCHEDULED_JOBS_SET.replace("{queue}", name), - signal_list: SIGNAL_LIST.replace("{queue}", name), - }, scripts: RedisScript { ack_job: redis::Script::new(include_str!("../lua/ack_job.lua")), push_job: redis::Script::new(include_str!("../lua/push_job.lua")), @@ -297,18 +372,18 @@ impl RedisStorage { self.conn.clone() } - /// Get the underlying queue details - pub fn get_queue(&self) -> &RedisQueueInfo { - &self.queue + /// Get the config used by the storage + pub fn get_config(&self) -> &Config { + &self.config } - /// Get the underlying queue details + /// Get the underlying codec details pub fn get_codec(&self) -> &RedisCodec { &self.codec } } -impl Backend> +impl Backend> for RedisStorage { type Stream = BackendStream>>; @@ -364,8 +439,8 @@ impl Ack for RedisStorage { ) -> Result<(), RedisError> { let mut conn = self.conn.clone(); let ack_job = self.scripts.ack_job.clone(); - let inflight_set = format!("{}:{}", self.queue.inflight_jobs_set, worker_id); - let done_jobs_set = &self.queue.done_jobs_set.to_string(); + let inflight_set = format!("{}:{}", self.config.inflight_jobs_set(), worker_id); + let done_jobs_set = &self.config.done_jobs_set(); let now: i64 = Utc::now().timestamp(); @@ -388,12 +463,13 @@ impl RedisStorage ) -> RequestStream> { let mut conn = self.conn.clone(); let fetch_jobs = self.scripts.get_jobs.clone(); - let consumers_set = self.queue.consumers_set.to_string(); - let active_jobs_list = self.queue.active_jobs_list.to_string(); - let job_data_hash = self.queue.job_data_hash.to_string(); - let inflight_set = format!("{}:{}", self.queue.inflight_jobs_set, worker_id); - let signal_list = self.queue.signal_list.to_string(); + let consumers_set = self.config.consumers_set(); + let active_jobs_list = self.config.active_jobs_list(); + let job_data_hash = self.config.job_data_hash(); + let inflight_set = format!("{}:{}", self.config.inflight_jobs_set(), worker_id); + let signal_list = self.config.signal_list(); let codec = self.codec.clone(); + let namespace = self.config.namespace.clone(); Box::pin(try_stream! { loop { apalis_core::sleep(interval).await; @@ -409,7 +485,11 @@ impl RedisStorage match result { Ok(jobs) => { for job in jobs { - yield deserialize_job(&job).map(|res| codec.decode(res)).transpose()?.map(Into::into) + let request = deserialize_job(&job).map(|res| codec.decode(res)).transpose()?.map(Into::into).map(|mut req: Request| { + req.insert(Namespace(namespace.clone())); + req + }); + yield request } }, Err(e) => { @@ -450,8 +530,8 @@ impl RedisStorage { async fn keep_alive(&mut self, worker_id: &WorkerId) -> Result<(), RedisError> { let mut conn = self.conn.clone(); let register_consumer = self.scripts.register_consumer.clone(); - let inflight_set = format!("{}:{}", self.queue.inflight_jobs_set, worker_id); - let consumers_set = self.queue.consumers_set.to_string(); + let inflight_set = format!("{}:{}", self.config.inflight_jobs_set(), worker_id); + let consumers_set = self.config.consumers_set(); let now: i64 = Utc::now().timestamp(); @@ -466,7 +546,7 @@ impl RedisStorage { impl Storage for RedisStorage where - T: Serialize + DeserializeOwned + Send + 'static + Unpin + Job + Sync, + T: Serialize + DeserializeOwned + Send + 'static + Unpin + Sync, { type Job = T; type Error = RedisError; @@ -475,9 +555,9 @@ where async fn push(&mut self, job: Self::Job) -> Result { let mut conn = self.conn.clone(); let push_job = self.scripts.push_job.clone(); - let job_data_hash = self.queue.job_data_hash.to_string(); - let active_jobs_list = self.queue.active_jobs_list.to_string(); - let signal_list = self.queue.signal_list.to_string(); + let job_data_hash = self.config.job_data_hash(); + let active_jobs_list = self.config.active_jobs_list(); + let signal_list = self.config.signal_list(); let job_id = TaskId::new(); let ctx = Context { attempts: 0, @@ -501,8 +581,8 @@ where async fn schedule(&mut self, job: Self::Job, on: i64) -> Result { let mut conn = self.conn.clone(); let schedule_job = self.scripts.schedule_job.clone(); - let job_data_hash = self.queue.job_data_hash.to_string(); - let scheduled_jobs_set = self.queue.scheduled_jobs_set.to_string(); + let job_data_hash = self.config.job_data_hash(); + let scheduled_jobs_set = self.config.scheduled_jobs_set(); let job_id = TaskId::new(); let ctx = Context { attempts: 0, @@ -527,11 +607,11 @@ where async fn len(&self) -> Result { let mut conn = self.conn.clone(); let all_jobs: i64 = redis::cmd("HLEN") - .arg(&self.queue.job_data_hash.to_string()) + .arg(&self.config.job_data_hash()) .query_async(&mut conn) .await?; let done_jobs: i64 = redis::cmd("ZCOUNT") - .arg(self.queue.done_jobs_set.to_owned()) + .arg(self.config.done_jobs_set()) .arg("-inf") .arg("+inf") .query_async(&mut conn) @@ -542,7 +622,7 @@ where async fn fetch_by_id(&self, job_id: &TaskId) -> Result>, RedisError> { let mut conn = self.conn.clone(); let data: Value = redis::cmd("HMGET") - .arg(&self.queue.job_data_hash.to_string()) + .arg(&self.config.job_data_hash()) .arg(job_id.to_string()) .query_async(&mut conn) .await?; @@ -569,7 +649,7 @@ where .encode(&job) .map_err(|e| (ErrorKind::IoError, "Encode error", e.to_string()))?; let _: i64 = redis::cmd("HSET") - .arg(&self.queue.job_data_hash.to_string()) + .arg(&self.config.job_data_hash()) .arg(job.ctx.id.to_string()) .arg(bytes) .query_async(&mut conn) @@ -592,15 +672,15 @@ where .codec .encode(&(job.try_into()?)) .map_err(|e| (ErrorKind::IoError, "Encode error", e.to_string()))?; - let job_data_hash = self.queue.job_data_hash.to_string(); - let scheduled_jobs_set = self.queue.scheduled_jobs_set.to_string(); + let job_data_hash = self.config.job_data_hash(); + let scheduled_jobs_set = self.config.scheduled_jobs_set(); let on: i64 = Utc::now().timestamp(); let wait: i64 = wait .as_secs() .try_into() .map_err(|e: TryFromIntError| (ErrorKind::IoError, "Duration error", e.to_string()))?; - let inflight_set = format!("{}:{}", self.queue.inflight_jobs_set, worker_id); - let failed_jobs_set = self.queue.failed_jobs_set.to_string(); + let inflight_set = format!("{}:{}", self.config.inflight_jobs_set(), worker_id); + let failed_jobs_set = self.config.failed_jobs_set(); redis::cmd("SREM") .arg(inflight_set) .arg(job_id.to_string()) @@ -630,8 +710,8 @@ where let mut conn = self.conn.clone(); vacuum_script - .key(self.queue.dead_jobs_set.clone()) - .key(self.queue.job_data_hash.clone()) + .key(self.config.dead_jobs_set()) + .key(self.config.job_data_hash()) .invoke_async(&mut conn) .await } @@ -641,15 +721,15 @@ impl RedisStorage { /// Attempt to retry a job pub async fn retry(&mut self, worker_id: &WorkerId, task_id: &TaskId) -> Result where - T: Send + DeserializeOwned + Serialize + Job + Unpin + Sync + 'static, + T: Send + DeserializeOwned + Serialize + Unpin + Sync + 'static, { let mut conn = self.conn.clone(); let retry_job = self.scripts.retry_job.clone(); - let inflight_set = format!("{}:{}", self.queue.inflight_jobs_set, worker_id); - let scheduled_jobs_set = self.queue.scheduled_jobs_set.to_string(); - let job_data_hash = self.queue.job_data_hash.to_string(); + let inflight_set = format!("{}:{}", self.config.inflight_jobs_set(), worker_id); + let scheduled_jobs_set = self.config.scheduled_jobs_set(); + let job_data_hash = self.config.job_data_hash(); let job_fut = self.fetch_by_id(task_id); - let failed_jobs_set = self.queue.failed_jobs_set.to_string(); + let failed_jobs_set = self.config.failed_jobs_set(); let mut storage = self.clone(); let now: i64 = Utc::now().timestamp(); let res = job_fut.await?; @@ -692,13 +772,13 @@ impl RedisStorage { /// Attempt to kill a job pub async fn kill(&mut self, worker_id: &WorkerId, task_id: &TaskId) -> Result<(), RedisError> where - T: Send + DeserializeOwned + Serialize + Job + Unpin + Sync + 'static, + T: Send + DeserializeOwned + Serialize + Unpin + Sync + 'static, { let mut conn = self.conn.clone(); let kill_job = self.scripts.kill_job.clone(); - let current_worker_id = format!("{}:{}", self.queue.inflight_jobs_set, worker_id); - let job_data_hash = self.queue.job_data_hash.to_string(); - let dead_jobs_set = self.queue.dead_jobs_set.to_string(); + let current_worker_id = format!("{}:{}", self.config.inflight_jobs_set(), worker_id); + let job_data_hash = self.config.job_data_hash(); + let dead_jobs_set = self.config.dead_jobs_set(); let fetch_job = self.fetch_by_id(task_id); let now: i64 = Utc::now().timestamp(); let res = fetch_job.await?; @@ -725,9 +805,9 @@ impl RedisStorage { /// Required to add scheduled jobs to the active set pub async fn enqueue_scheduled(&mut self, count: usize) -> Result { let enqueue_jobs = self.scripts.enqueue_scheduled.clone(); - let scheduled_jobs_set = self.queue.scheduled_jobs_set.to_string(); - let active_jobs_list = self.queue.active_jobs_list.to_string(); - let signal_list = self.queue.signal_list.to_string(); + let scheduled_jobs_set = self.config.scheduled_jobs_set(); + let active_jobs_list = self.config.active_jobs_list(); + let signal_list = self.config.signal_list(); let now: i64 = Utc::now().timestamp(); let res: Result = enqueue_jobs .key(scheduled_jobs_set) @@ -747,9 +827,9 @@ impl RedisStorage { pub async fn reenqueue_active(&mut self, job_ids: Vec<&TaskId>) -> Result<(), RedisError> { let mut conn = self.conn.clone(); let reenqueue_active = self.scripts.reenqueue_active.clone(); - let inflight_set = self.queue.inflight_jobs_set.to_string(); - let active_jobs_list = self.queue.active_jobs_list.to_string(); - let signal_list = self.queue.signal_list.to_string(); + let inflight_set = self.config.inflight_jobs_set().to_string(); + let active_jobs_list = self.config.active_jobs_list(); + let signal_list = self.config.signal_list(); reenqueue_active .key(inflight_set) @@ -771,9 +851,9 @@ impl RedisStorage { dead_since: i64, ) -> Result { let reenqueue_orphaned = self.scripts.reenqueue_orphaned.clone(); - let consumers_set = self.queue.consumers_set.to_string(); - let active_jobs_list = self.queue.active_jobs_list.to_string(); - let signal_list = self.queue.signal_list.to_string(); + let consumers_set = self.config.consumers_set(); + let active_jobs_list = self.config.active_jobs_list(); + let signal_list = self.config.signal_list(); let res: Result = reenqueue_orphaned .key(consumers_set) diff --git a/packages/apalis-sql/Cargo.toml b/packages/apalis-sql/Cargo.toml index f7dec23d..3e5ac1b0 100644 --- a/packages/apalis-sql/Cargo.toml +++ b/packages/apalis-sql/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-sql" -version = "0.5.3" +version = "0.6.0-alpha.0" authors = ["Njuguna Mureithi "] edition = "2021" readme = "../../README.md" @@ -25,7 +25,7 @@ features = ["chrono"] [dependencies] serde = { version = "1", features = ["derive"] } serde_json = "1" -apalis-core = { path = "../../packages/apalis-core", version = "0.5.3", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-alpha.0", default-features = false, features = [ "sleep", "json", ] } diff --git a/packages/apalis-sql/src/lib.rs b/packages/apalis-sql/src/lib.rs index 25caf278..ed611938 100644 --- a/packages/apalis-sql/src/lib.rs +++ b/packages/apalis-sql/src/lib.rs @@ -39,6 +39,7 @@ pub struct Config { keep_alive: Duration, buffer_size: usize, poll_interval: Duration, + namespace: String, } impl Default for Config { @@ -47,6 +48,7 @@ impl Default for Config { keep_alive: Duration::from_secs(30), buffer_size: 10, poll_interval: Duration::from_millis(50), + namespace: String::from("apalis::sql"), } } } @@ -75,4 +77,12 @@ impl Config { self.buffer_size = buffer_size; self } + + /// Set the namespace to consume and push jobs to + /// + /// Defaults to "apalis::sql" + pub fn namespace(mut self, namespace: &str) -> Self { + self.namespace = namespace.to_owned(); + self + } } diff --git a/packages/apalis-sql/src/mysql.rs b/packages/apalis-sql/src/mysql.rs index 5f136959..c81c7ac5 100644 --- a/packages/apalis-sql/src/mysql.rs +++ b/packages/apalis-sql/src/mysql.rs @@ -6,7 +6,8 @@ use apalis_core::poller::controller::Controller; use apalis_core::poller::stream::BackendStream; use apalis_core::poller::Poller; use apalis_core::request::{Request, RequestStream}; -use apalis_core::storage::{Job, Storage}; +use apalis_core::storage::Storage; +use apalis_core::task::namespace::Namespace; use apalis_core::task::task_id::TaskId; use apalis_core::worker::WorkerId; use apalis_core::{Backend, Codec}; @@ -109,16 +110,17 @@ impl MysqlStorage { } } -impl MysqlStorage { +impl MysqlStorage { fn stream_jobs( self, worker_id: &WorkerId, interval: Duration, buffer_size: usize, + config: &Config, ) -> impl Stream>, sqlx::Error>> { let pool = self.pool.clone(); let worker_id = worker_id.to_string(); - + let config = config.clone(); try_stream! { let pool = pool.clone(); let buffer_size = u32::try_from(buffer_size) @@ -126,7 +128,7 @@ impl MysqlStorage loop { apalis_core::sleep(interval).await; let pool = pool.clone(); - let job_type = T::NAME; + let job_type = self.config.namespace.clone(); let mut tx = pool.begin().await?; let fetch_query = "SELECT id FROM jobs WHERE status = 'Pending' AND run_at <= NOW() AND job_type = ? ORDER BY run_at ASC LIMIT ? FOR UPDATE SKIP LOCKED"; @@ -159,7 +161,10 @@ impl MysqlStorage yield Some(Into::into(SqlRequest { context: job.context, req: self.codec.decode(&job.req).map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))? - })) + })).map(|mut req: Request| { + req.insert(Namespace(config.namespace.clone())); + req + }) } } } @@ -174,7 +179,7 @@ impl MysqlStorage let pool = self.pool.clone(); let mut tx = pool.acquire().await?; - let worker_type = T::NAME; + let worker_type = self.config.namespace.clone(); let storage_name = std::any::type_name::(); let query = "REPLACE INTO workers (id, worker_type, storage_name, layers, last_seen) VALUES (?, ?, ?, ?, ?);"; @@ -192,7 +197,7 @@ impl MysqlStorage impl Storage for MysqlStorage where - T: Job + Serialize + DeserializeOwned + Send + 'static + Unpin + Sync, + T: Serialize + DeserializeOwned + Send + 'static + Unpin + Sync, { type Job = T; @@ -210,7 +215,7 @@ where .codec .encode(&job) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; - let job_type = T::NAME; + let job_type = self.config.namespace.clone(); sqlx::query(query) .bind(job) .bind(id.to_string()) @@ -231,7 +236,7 @@ where .encode(&job) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; - let job_type = T::NAME; + let job_type = self.config.namespace.clone(); sqlx::query(query) .bind(job) .bind(id.to_string()) @@ -346,7 +351,7 @@ where } } -impl Backend> +impl Backend> for MysqlStorage { type Stream = BackendStream>>; @@ -364,7 +369,7 @@ impl Back let ack_notify = self.ack_notify.clone(); let mut hb_storage = self.clone(); let stream = self - .stream_jobs(&worker, config.poll_interval, config.buffer_size) + .stream_jobs(&worker, config.poll_interval, config.buffer_size, &config) .map_err(|e| Error::SourceError(Box::new(e))); let stream = BackendStream::new(stream.boxed(), controller); @@ -426,7 +431,7 @@ impl Ack for MysqlStorage { } } -impl MysqlStorage { +impl MysqlStorage { /// Kill a job pub async fn kill(&mut self, worker_id: &WorkerId, job_id: &TaskId) -> Result<(), sqlx::Error> { let pool = self.pool.clone(); @@ -463,7 +468,7 @@ impl MysqlStorage { /// Readd jobs that are abandoned to the queue pub async fn reenqueue_orphaned(&self, timeout: i64) -> Result { - let job_type = T::NAME; + let job_type = self.config.namespace.clone(); let mut tx = self.pool.acquire().await?; let query = r#"Update jobs INNER JOIN ( SELECT workers.id as worker_id, jobs.id as job_id from workers INNER JOIN jobs ON jobs.lock_by = workers.id WHERE jobs.status = "Running" AND workers.last_seen < ? AND workers.worker_type = ? diff --git a/packages/apalis-sql/src/postgres.rs b/packages/apalis-sql/src/postgres.rs index 8d68865f..6de6bef9 100644 --- a/packages/apalis-sql/src/postgres.rs +++ b/packages/apalis-sql/src/postgres.rs @@ -48,7 +48,8 @@ use apalis_core::poller::controller::Controller; use apalis_core::poller::stream::BackendStream; use apalis_core::poller::Poller; use apalis_core::request::{Request, RequestStream}; -use apalis_core::storage::{Job, Storage}; +use apalis_core::storage::Storage; +use apalis_core::task::namespace::Namespace; use apalis_core::task::task_id::TaskId; use apalis_core::worker::WorkerId; use apalis_core::{Backend, Codec}; @@ -118,7 +119,7 @@ impl fmt::Debug for PostgresStorage { } } -impl Backend> +impl Backend> for PostgresStorage { type Stream = BackendStream>>; @@ -241,11 +242,11 @@ impl PgListen { } /// Add a new subscription - pub fn subscribe(&mut self) -> PgSubscription { + pub fn subscribe(&mut self, namespace: &str) -> PgSubscription { let sub = PgSubscription { notify: Notify::new(), }; - self.subscriptions.push((T::NAME.to_owned(), sub.clone())); + self.subscriptions.push((namespace.to_owned(), sub.clone())); sub } /// Start listening to jobs @@ -264,7 +265,7 @@ impl PgListen { } } -impl PostgresStorage { +impl PostgresStorage { fn stream_jobs( &self, worker_id: &WorkerId, @@ -274,12 +275,13 @@ impl PostgresStorage { let pool = self.pool.clone(); let worker_id = worker_id.clone(); let codec = self.codec.clone(); + let config = self.config.clone(); try_stream! { loop { // Ideally wait for a job or a tick apalis_core::sleep(interval).await; let tx = pool.clone(); - let job_type = T::NAME; + let job_type = &config.namespace; let fetch_query = "Select * from apalis.get_jobs($1, $2, $3);"; let jobs: Vec> = sqlx::query_as(fetch_query) .bind(worker_id.to_string()) @@ -293,7 +295,10 @@ impl PostgresStorage { yield Some(Into::into(SqlRequest { context: job.context, req: codec.decode(&job.req).map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?, - })) + })).map(|mut req: Request| { + req.insert(Namespace(config.namespace.clone())); + req + }) } } @@ -310,7 +315,7 @@ impl PostgresStorage { let last_seen = DateTime::from_timestamp(last_seen, 0).ok_or(sqlx::Error::Io( io::Error::new(io::ErrorKind::InvalidInput, "Invalid Timestamp"), ))?; - let worker_type = T::NAME; + let worker_type = self.config.namespace.clone(); let storage_name = std::any::type_name::(); let query = "INSERT INTO apalis.workers (id, worker_type, storage_name, layers, last_seen) VALUES ($1, $2, $3, $4, $5) @@ -330,7 +335,7 @@ impl PostgresStorage { impl Storage for PostgresStorage where - T: Job + Serialize + DeserializeOwned + Send + 'static + Unpin + Sync, + T: Serialize + DeserializeOwned + Send + 'static + Unpin + Sync, { type Job = T; @@ -353,11 +358,11 @@ where .codec .encode(&job) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; - let job_type = T::NAME; + let job_type = self.config.namespace.clone(); sqlx::query(query) .bind(job) .bind(id.to_string()) - .bind(job_type.to_string()) + .bind(&job_type) .execute(&pool) .await?; Ok(id) @@ -373,7 +378,7 @@ where .codec .encode(&job) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidInput, e)))?; - let job_type = T::NAME; + let job_type = self.config.namespace.clone(); sqlx::query(query) .bind(job) .bind(id.to_string()) @@ -543,11 +548,8 @@ impl PostgresStorage { } /// Reenqueue jobs that have been abandoned by their workers - pub async fn reenqueue_orphaned(&self, count: i32) -> Result<(), sqlx::Error> - where - T: Job, - { - let job_type = T::NAME; + pub async fn reenqueue_orphaned(&self, count: i32) -> Result<(), sqlx::Error> { + let job_type = self.config.namespace.clone(); let mut tx = self.pool.acquire().await?; let query = "Update apalis.jobs SET status = 'Pending', done_at = NULL, lock_by = NULL, lock_at = NULL, last_error ='Job was abandoned' diff --git a/packages/apalis-sql/src/sqlite.rs b/packages/apalis-sql/src/sqlite.rs index e73983f5..e8aad0bf 100644 --- a/packages/apalis-sql/src/sqlite.rs +++ b/packages/apalis-sql/src/sqlite.rs @@ -8,7 +8,8 @@ use apalis_core::poller::controller::Controller; use apalis_core::poller::stream::BackendStream; use apalis_core::poller::Poller; use apalis_core::request::{Request, RequestStream}; -use apalis_core::storage::{Job, Storage}; +use apalis_core::storage::Storage; +use apalis_core::task::namespace::Namespace; use apalis_core::task::task_id::TaskId; use apalis_core::worker::WorkerId; use apalis_core::{Backend, Codec}; @@ -89,7 +90,7 @@ impl SqliteStorage<()> { } } -impl SqliteStorage { +impl SqliteStorage { /// Construct a new Storage from a pool pub fn new(pool: SqlitePool) -> Self { Self::new_with_config(pool, Config::default()) @@ -112,7 +113,7 @@ impl SqliteStorage { last_seen: i64, ) -> Result<(), sqlx::Error> { let pool = self.pool.clone(); - let worker_type = T::NAME; + let worker_type = self.config.namespace.clone(); let storage_name = std::any::type_name::(); let query = "INSERT INTO Workers (id, worker_type, storage_name, layers, last_seen) VALUES ($1, $2, $3, $4, $5) @@ -135,10 +136,11 @@ impl SqliteStorage { } } -async fn fetch_next( +async fn fetch_next( pool: Pool, worker_id: &WorkerId, id: String, + config: &Config, ) -> Result>, sqlx::Error> { let now: i64 = Utc::now().timestamp(); let update_query = "UPDATE Jobs SET status = 'Running', lock_by = ?2, lock_at = ?3 WHERE id = ?1 AND job_type = ?4 AND status = 'Pending' AND lock_by IS NULL; Select * from Jobs where id = ?1 AND lock_by = ?2 AND job_type = ?4"; @@ -146,14 +148,14 @@ async fn fetch_next( .bind(id.to_string()) .bind(worker_id.to_string()) .bind(now) - .bind(T::NAME) + .bind(config.namespace.clone()) .fetch_optional(&pool) .await?; Ok(job) } -impl SqliteStorage { +impl SqliteStorage { fn stream_jobs( &self, worker_id: &WorkerId, @@ -163,12 +165,13 @@ impl SqliteStorage { let pool = self.pool.clone(); let worker_id = worker_id.clone(); let codec = self.codec.clone(); + let config = self.config.clone(); try_stream! { loop { apalis_core::sleep(interval).await; let tx = pool.clone(); let mut tx = tx.acquire().await?; - let job_type = T::NAME; + let job_type = &config.namespace; let fetch_query = "SELECT id FROM Jobs WHERE (status = 'Pending' OR (status = 'Failed' AND attempts < max_attempts)) AND run_at < ?1 AND job_type = ?2 LIMIT ?3"; let now: i64 = Utc::now().timestamp(); @@ -179,7 +182,7 @@ impl SqliteStorage { .fetch_all(&mut *tx) .await?; for id in ids { - let res = fetch_next::(pool.clone(), &worker_id, id.0).await?; + let res = fetch_next(pool.clone(), &worker_id, id.0, &config).await?; yield match res { None => None::>, Some(c) => Some( @@ -190,7 +193,10 @@ impl SqliteStorage { })?, } .into(), - ), + ).map(|mut req: Request| { + req.insert(Namespace(config.namespace.clone())); + req + }), } .map(Into::into); @@ -202,7 +208,7 @@ impl SqliteStorage { impl Storage for SqliteStorage where - T: Job + Serialize + DeserializeOwned + Send + 'static + Unpin + Sync, + T: Serialize + DeserializeOwned + Send + 'static + Unpin + Sync, { type Job = T; @@ -219,7 +225,7 @@ where .codec .encode(&job) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; - let job_type = T::NAME; + let job_type = self.config.namespace.clone(); sqlx::query(query) .bind(job) .bind(id.to_string()) @@ -238,7 +244,7 @@ where .codec .encode(&job) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; - let job_type = T::NAME; + let job_type = self.config.namespace.clone(); sqlx::query(query) .bind(job) .bind(id.to_string()) @@ -393,11 +399,8 @@ impl SqliteStorage { } /// Add jobs that failed back to the queue if there are still remaining attemps - pub async fn reenqueue_failed(&self) -> Result<(), sqlx::Error> - where - T: Job, - { - let job_type = T::NAME; + pub async fn reenqueue_failed(&self) -> Result<(), sqlx::Error> { + let job_type = self.config.namespace.clone(); let mut tx = self.pool.acquire().await?; let query = r#"Update Jobs SET status = "Pending", done_at = NULL, lock_by = NULL, lock_at = NULL @@ -419,11 +422,8 @@ impl SqliteStorage { } /// Add jobs that workers have disappeared to the queue - pub async fn reenqueue_orphaned(&self, timeout: i64) -> Result<(), sqlx::Error> - where - T: Job, - { - let job_type = T::NAME; + pub async fn reenqueue_orphaned(&self, timeout: i64) -> Result<(), sqlx::Error> { + let job_type = self.config.namespace.clone(); let mut tx = self.pool.acquire().await?; let query = r#"Update Jobs SET status = "Pending", done_at = NULL, lock_by = NULL, lock_at = NULL, last_error ="Job was abandoned" @@ -442,7 +442,7 @@ impl SqliteStorage { } } -impl Backend> +impl Backend> for SqliteStorage { type Stream = BackendStream>>; diff --git a/src/layers/prometheus/mod.rs b/src/layers/prometheus/mod.rs index 7202a02a..66923d1f 100644 --- a/src/layers/prometheus/mod.rs +++ b/src/layers/prometheus/mod.rs @@ -4,7 +4,7 @@ use std::{ time::Instant, }; -use apalis_core::{error::Error, request::Request, storage::Job}; +use apalis_core::{error::Error, request::Request, task::namespace::Namespace}; use futures::Future; use pin_project_lite::pin_project; use tower::{Layer, Service}; @@ -31,7 +31,6 @@ impl Service> for PrometheusService where S: Service, Response = Res, Error = Error, Future = F>, F: Future> + 'static, - J: Job, { type Response = S::Response; type Error = S::Error; @@ -43,14 +42,16 @@ where fn call(&mut self, request: Request) -> Self::Future { let start = Instant::now(); + let namespace = request.get::().unwrap().to_string(); + let req = self.service.call(request); let job_type = std::any::type_name::().to_string(); - let op = J::NAME; + ResponseFuture { inner: req, start, job_type, - operation: op.to_string(), + operation: namespace, } } } @@ -88,10 +89,11 @@ where ("name", this.operation.to_string()), ("namespace", this.job_type.to_string()), ("status", status), - ("latency", latency.to_string()), ]; - metrics::counter!("requests_total", &labels); - metrics::histogram!("request_duration_seconds", &labels); + let counter = metrics::counter!("requests_total", &labels); + counter.increment(1); + let hist = metrics::histogram!("request_duration_seconds", &labels); + hist.record(latency); Poll::Ready(response) } } diff --git a/src/layers/sentry/mod.rs b/src/layers/sentry/mod.rs index 9bcfcd7d..de6f9ae2 100644 --- a/src/layers/sentry/mod.rs +++ b/src/layers/sentry/mod.rs @@ -3,13 +3,13 @@ use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; +use apalis_core::task::namespace::Namespace; use sentry_core::protocol; use tower::Layer; use tower::Service; use apalis_core::error::Error; use apalis_core::request::Request; -use apalis_core::storage::Job; use apalis_core::task::attempt::Attempt; use apalis_core::task::task_id::TaskId; @@ -130,7 +130,6 @@ impl Service> for SentryJobService where S: Service, Response = Res, Error = Error, Future = F>, F: Future> + 'static, - J: Job, { type Response = S::Response; type Error = S::Error; @@ -141,11 +140,12 @@ where } fn call(&mut self, request: Request) -> Self::Future { - let op = J::NAME; - let trx_ctx = sentry_core::TransactionContext::new(op, "apalis.job"); let job_type = std::any::type_name::().to_string(); let ctx = request.get::().cloned().unwrap_or_default(); let task_id = request.get::().unwrap(); + let namespace = request.get::().unwrap(); + let trx_ctx = sentry_core::TransactionContext::new(namespace, "apalis.job"); + let job_details = Task { id: task_id.clone(), current_attempt: ctx.current().try_into().unwrap(), diff --git a/src/lib.rs b/src/lib.rs index 34e190a7..f458686e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -144,14 +144,14 @@ pub mod prelude { layers::extensions::{AddExtension, Data}, memory::{MemoryStorage, MemoryWrapper}, monitor::{Monitor, MonitorContext}, - mq::{Message, MessageQueue}, + mq::MessageQueue, notify::Notify, poller::stream::BackendStream, poller::{controller::Controller, FetchNext, Poller}, request::{Request, RequestStream}, response::IntoResponse, service_fn::{service_fn, FromData, ServiceFn}, - storage::{Job, Storage, StorageStream}, + storage::{Storage, StorageStream}, task::attempt::Attempt, task::task_id::TaskId, worker::{Context, Event, Ready, Worker, WorkerError, WorkerId}, From 7756b37cdb87cbfabd897a6efb838e0b663125e3 Mon Sep 17 00:00:00 2001 From: geofmureithi Date: Wed, 26 Jun 2024 22:05:16 +0300 Subject: [PATCH 05/59] fix: minor fixes on some failures --- README.md | 5 ----- benches/storages.rs | 15 +++++---------- packages/apalis-cron/README.md | 3 --- packages/apalis-sql/src/mysql.rs | 2 +- 4 files changed, 6 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index b678e7f5..daf20977 100644 --- a/README.md +++ b/README.md @@ -68,17 +68,12 @@ apalis = { version = "0.5", features = ["redis"] } # Backends available: postgre use apalis::prelude::*; use apalis::redis::RedisStorage; use serde::{Deserialize, Serialize}; -use anyhow::Result; #[derive(Debug, Deserialize, Serialize)] struct Email { to: String, } -impl Job for Email { - const NAME: &'static str = "apalis::Email"; -} - /// A function that will be converted into a service. async fn send_email(job: Email, data: Data) -> Result<(), Error> { /// execute job diff --git a/benches/storages.rs b/benches/storages.rs index 45b28741..1b099284 100644 --- a/benches/storages.rs +++ b/benches/storages.rs @@ -77,11 +77,6 @@ macro_rules! define_bench { #[derive(Serialize, Deserialize, Debug)] struct TestJob; - -impl Job for TestJob { - const NAME: &'static str = "TestJob"; -} - #[derive(Debug, Default, Clone)] struct Counter(Arc); @@ -148,11 +143,11 @@ define_bench!("postgres", { PostgresStorage::new(pool) }); -// define_bench!("mysql", { -// let pool = MySqlPool::connect(env!("MYSQL_URL")).await.unwrap(); -// let _ = MysqlStorage::setup(&pool).await.unwrap(); -// MysqlStorage::new(pool) -// }); +define_bench!("mysql", { + let pool = MySqlPool::connect(env!("MYSQL_URL")).await.unwrap(); + let _ = MysqlStorage::setup(&pool).await.unwrap(); + MysqlStorage::new(pool) +}); criterion_group!(benches, sqlite_in_memory, redis, postgres); criterion_main!(benches); diff --git a/packages/apalis-cron/README.md b/packages/apalis-cron/README.md index ee1158b8..d9d0e0c4 100644 --- a/packages/apalis-cron/README.md +++ b/packages/apalis-cron/README.md @@ -15,9 +15,6 @@ use std::str::FromStr; #[derive(Default, Debug, Clone)] struct Reminder; -impl Job for Reminder { - const NAME: &'static str = "reminder::DailyReminder"; -} async fn send_reminder(job: Reminder, ctx: JobContext) { // Do reminder stuff } diff --git a/packages/apalis-sql/src/mysql.rs b/packages/apalis-sql/src/mysql.rs index c81c7ac5..faf67673 100644 --- a/packages/apalis-sql/src/mysql.rs +++ b/packages/apalis-sql/src/mysql.rs @@ -539,7 +539,7 @@ mod tests { async fn consume_one(storage: &MysqlStorage, worker_id: &WorkerId) -> Request { let storage = storage.clone(); - let mut stream = storage.stream_jobs(worker_id, std::time::Duration::from_secs(10), 1); + let mut stream = storage.stream_jobs(worker_id, std::time::Duration::from_secs(10), 1, &Config::default()); stream .next() .await From fcce6594ce664def868c1460e593f5034dde458a Mon Sep 17 00:00:00 2001 From: geofmureithi Date: Wed, 26 Jun 2024 22:07:21 +0300 Subject: [PATCH 06/59] lint: cargo fmt --- packages/apalis-sql/src/mysql.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/packages/apalis-sql/src/mysql.rs b/packages/apalis-sql/src/mysql.rs index faf67673..d894b59a 100644 --- a/packages/apalis-sql/src/mysql.rs +++ b/packages/apalis-sql/src/mysql.rs @@ -539,7 +539,12 @@ mod tests { async fn consume_one(storage: &MysqlStorage, worker_id: &WorkerId) -> Request { let storage = storage.clone(); - let mut stream = storage.stream_jobs(worker_id, std::time::Duration::from_secs(10), 1, &Config::default()); + let mut stream = storage.stream_jobs( + worker_id, + std::time::Duration::from_secs(10), + 1, + &Config::default(), + ); stream .next() .await From 009f5771cb4d8ddb003c7cc50178f8de66239e40 Mon Sep 17 00:00:00 2001 From: geofmureithi Date: Wed, 26 Jun 2024 22:10:43 +0300 Subject: [PATCH 07/59] fix: remove Job impl --- src/lib.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index f458686e..06e24206 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -25,11 +25,7 @@ //! struct Email { //! to: String, //! } -//! -//! impl Job for Email { -//! const NAME: &'static str = "apalis::Email"; -//! } -//! +//! //! async fn send_email(job: Email, data: Data) -> Result<(), Error> { //! Ok(()) //! } From 7bbde7f564780447116aaef699f570aed586cba7 Mon Sep 17 00:00:00 2001 From: geofmureithi Date: Wed, 26 Jun 2024 22:19:11 +0300 Subject: [PATCH 08/59] lint: cargo fmt --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 06e24206..b8464ebe 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -25,7 +25,7 @@ //! struct Email { //! to: String, //! } -//! +//! //! async fn send_email(job: Email, data: Data) -> Result<(), Error> { //! Ok(()) //! } From b0af55c3a3cbce27d7209c16c6067110f1f2ef5f Mon Sep 17 00:00:00 2001 From: geofmureithi Date: Wed, 26 Jun 2024 22:31:14 +0300 Subject: [PATCH 09/59] bench: improve polling --- benches/storages.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benches/storages.rs b/benches/storages.rs index 1b099284..64a067c8 100644 --- a/benches/storages.rs +++ b/benches/storages.rs @@ -51,7 +51,7 @@ macro_rules! define_bench { for _i in 0..s { let _ = s1.push(TestJob).await; } - while s1.len().await.unwrap_or(-1) != 0 { + while counter.0.load(Ordering::Relaxed) != s && s1.len().await.unwrap_or(-1) != 0 { interval.tick().await; } counter.0.store(0, Ordering::Relaxed); From 2fec565a9ebfc355db45aa83cca8f0af66fce4e2 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Thu, 27 Jun 2024 22:35:00 +0300 Subject: [PATCH 10/59] fix: introduce namespace and codec config (#339) * fix: introduce namespace and codec config * fix: missing apis --- Cargo.toml | 2 +- README.md | 4 +- benches/storages.rs | 9 +-- examples/actix-web/src/main.rs | 3 +- examples/axum/src/main.rs | 3 +- examples/prometheus/src/main.rs | 3 +- examples/redis-with-msg-pack/Cargo.toml | 20 ++++++ examples/redis-with-msg-pack/src/main.rs | 66 +++++++++++++++++++ examples/redis/src/main.rs | 5 +- examples/sentry/src/main.rs | 4 +- examples/tracing/src/main.rs | 4 +- .../apalis-core/src/codec/message_pack.rs | 0 packages/apalis-redis/src/lib.rs | 4 +- packages/apalis-redis/src/storage.rs | 47 ++++++++----- src/lib.rs | 4 +- 15 files changed, 139 insertions(+), 39 deletions(-) create mode 100644 examples/redis-with-msg-pack/Cargo.toml create mode 100644 examples/redis-with-msg-pack/src/main.rs delete mode 100644 packages/apalis-core/src/codec/message_pack.rs diff --git a/Cargo.toml b/Cargo.toml index 9a346828..aaf6f19d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -148,7 +148,7 @@ members = [ "examples/tracing", # "examples/rest-api", "examples/async-std-runtime", - "examples/basics", + "examples/basics", "examples/redis-with-msg-pack", ] diff --git a/README.md b/README.md index daf20977..08acd4fe 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,7 @@ apalis = { version = "0.5", features = ["redis"] } # Backends available: postgre ```rust use apalis::prelude::*; -use apalis::redis::RedisStorage; +use apalis::redis::{RedisStorage, Config}; use serde::{Deserialize, Serialize}; #[derive(Debug, Deserialize, Serialize)] @@ -85,7 +85,7 @@ async fn main() -> Result<()> { std::env::set_var("RUST_LOG", "debug"); env_logger::init(); let redis_url = std::env::var("REDIS_URL").expect("Missing env variable REDIS_URL"); - let storage = RedisStorage::new(redis).await?; + let storage = RedisStorage::new(redis, Config::default()).await?; Monitor::new() .register_with_count(2, { WorkerBuilder::new(format!("email-worker")) diff --git a/benches/storages.rs b/benches/storages.rs index 64a067c8..b80f38fd 100644 --- a/benches/storages.rs +++ b/benches/storages.rs @@ -6,6 +6,7 @@ use apalis::{ postgres::{PgPool, PostgresStorage}, sqlite::{SqlitePool, SqliteStorage}, }; +use apalis_redis::Config; use criterion::*; use futures::Future; use paste::paste; @@ -26,7 +27,7 @@ macro_rules! define_bench { group.bench_with_input(BenchmarkId::new("consume", size), &size, |b, &s| { b.to_async(Runtime::new().unwrap()) .iter_custom(|iters| async move { - let mut interval = tokio::time::interval(Duration::from_millis(100)); + let mut interval = tokio::time::interval(Duration::from_millis(150)); let storage = { $setup }; let mut s1 = storage.clone(); let counter = Counter::default(); @@ -51,7 +52,7 @@ macro_rules! define_bench { for _i in 0..s { let _ = s1.push(TestJob).await; } - while counter.0.load(Ordering::Relaxed) != s && s1.len().await.unwrap_or(-1) != 0 { + while (counter.0.load(Ordering::Relaxed) != s) || (s1.len().await.unwrap_or(-1) != 0) { interval.tick().await; } counter.0.store(0, Ordering::Relaxed); @@ -133,7 +134,7 @@ define_bench!("sqlite_in_memory", { define_bench!("redis", { let conn = apalis::redis::connect(env!("REDIS_URL")).await.unwrap(); - let redis = RedisStorage::new(conn); + let redis = RedisStorage::new(conn, Config::default()); redis }); @@ -149,5 +150,5 @@ define_bench!("mysql", { MysqlStorage::new(pool) }); -criterion_group!(benches, sqlite_in_memory, redis, postgres); +criterion_group!(benches, sqlite_in_memory); criterion_main!(benches); diff --git a/examples/actix-web/src/main.rs b/examples/actix-web/src/main.rs index 41e42be5..d8317105 100644 --- a/examples/actix-web/src/main.rs +++ b/examples/actix-web/src/main.rs @@ -2,6 +2,7 @@ use actix_web::rt::signal; use actix_web::{web, App, HttpResponse, HttpServer}; use anyhow::Result; use apalis::prelude::*; +use apalis::redis::Config; use apalis::utils::TokioExecutor; use apalis::{layers::tracing::TraceLayer, redis::RedisStorage}; use futures::future; @@ -27,7 +28,7 @@ async fn main() -> Result<()> { env_logger::init(); let conn = apalis::redis::connect("redis://127.0.0.1/").await?; - let storage = RedisStorage::new(conn); + let storage = RedisStorage::new(conn, Config::default()); let data = web::Data::new(storage.clone()); let http = async { HttpServer::new(move || { diff --git a/examples/axum/src/main.rs b/examples/axum/src/main.rs index 590cd32b..e9bfb3b5 100644 --- a/examples/axum/src/main.rs +++ b/examples/axum/src/main.rs @@ -5,6 +5,7 @@ //! ``` use anyhow::Result; use apalis::prelude::*; +use apalis::redis::Config; use apalis::{layers::tracing::TraceLayer, redis::RedisStorage}; use axum::{ extract::Form, @@ -56,7 +57,7 @@ async fn main() -> Result<()> { .with(tracing_subscriber::fmt::layer()) .init(); let conn = apalis::redis::connect("redis://127.0.0.1/").await?; - let storage = RedisStorage::new(conn); + let storage = RedisStorage::new(conn, Config::default()); // build our application with some routes let app = Router::new() .route("/", get(show_form).post(add_new_job::)) diff --git a/examples/prometheus/src/main.rs b/examples/prometheus/src/main.rs index 4f680cd1..0d01f426 100644 --- a/examples/prometheus/src/main.rs +++ b/examples/prometheus/src/main.rs @@ -5,6 +5,7 @@ //! ``` use anyhow::Result; use apalis::prelude::*; +use apalis::redis::Config; use apalis::{layers::prometheus::PrometheusLayer, redis::RedisStorage}; use axum::{ extract::Form, @@ -30,7 +31,7 @@ async fn main() -> Result<()> { .with(tracing_subscriber::fmt::layer()) .init(); let conn = apalis::redis::connect("redis://127.0.0.1/").await?; - let storage = RedisStorage::new(conn); + let storage = RedisStorage::new(conn, Config::default()); // build our application with some routes let recorder_handle = setup_metrics_recorder(); let app = Router::new() diff --git a/examples/redis-with-msg-pack/Cargo.toml b/examples/redis-with-msg-pack/Cargo.toml new file mode 100644 index 00000000..f459387c --- /dev/null +++ b/examples/redis-with-msg-pack/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "redis-with-msg-pack" +version = "0.1.0" +edition = "2021" + +[dependencies] +anyhow = "1" +tokio = { version = "1", features = ["full"] } +apalis = { path = "../../", features = ["redis", "timeout"] } +serde = "1" +env_logger = "0.10" +tracing-subscriber = "0.3.11" +chrono = { version = "0.4", default-features = false, features = ["clock"] } +email-service = { path = "../email-service" } +rmp-serde = "1.3" + + +[dependencies.tracing] +default-features = false +version = "0.1" diff --git a/examples/redis-with-msg-pack/src/main.rs b/examples/redis-with-msg-pack/src/main.rs new file mode 100644 index 00000000..fc096c62 --- /dev/null +++ b/examples/redis-with-msg-pack/src/main.rs @@ -0,0 +1,66 @@ +use std::time::Duration; + +use anyhow::Result; +use apalis::prelude::*; +use apalis::redis::RedisStorage; + +use email_service::{send_email, Email}; +use serde::{de::DeserializeOwned, Serialize}; +use tracing::info; + +struct MessagePack; + +impl Codec> for MessagePack { + type Error = Error; + fn encode(&self, input: &T) -> Result, Self::Error> { + rmp_serde::to_vec(input).map_err(|e| Error::SourceError(Box::new(e))) + } + + fn decode(&self, compact: &Vec) -> Result { + rmp_serde::from_slice(compact).map_err(|e| Error::SourceError(Box::new(e))) + } +} + +#[tokio::main] +async fn main() -> Result<()> { + std::env::set_var("RUST_LOG", "debug"); + + tracing_subscriber::fmt::init(); + + let conn = apalis::redis::connect("redis://127.0.0.1/").await?; + let config = apalis::redis::Config::default() + .set_namespace("apalis::redis-with-msg-pack") + .set_max_retries(5); + let storage = RedisStorage::new_with_codec(conn, config, MessagePack); + // This can be in another part of the program + produce_jobs(storage.clone()).await?; + + let worker = WorkerBuilder::new("rango-tango") + .with_storage(storage) + .build_fn(send_email); + + Monitor::::new() + .register_with_count(2, worker) + .shutdown_timeout(Duration::from_millis(5000)) + .run_with_signal(async { + tokio::signal::ctrl_c().await?; + info!("Monitor starting shutdown"); + Ok(()) + }) + .await?; + info!("Monitor shutdown complete"); + Ok(()) +} + +async fn produce_jobs(mut storage: RedisStorage) -> Result<()> { + for index in 0..10 { + storage + .push(Email { + to: index.to_string(), + text: "Test background job from apalis".to_string(), + subject: "Background email job".to_string(), + }) + .await?; + } + Ok(()) +} diff --git a/examples/redis/src/main.rs b/examples/redis/src/main.rs index d9b80e7f..20b1018c 100644 --- a/examples/redis/src/main.rs +++ b/examples/redis/src/main.rs @@ -5,8 +5,8 @@ use std::{ }; use anyhow::Result; -use apalis::prelude::*; use apalis::redis::RedisStorage; +use apalis::{prelude::*, redis::Config}; use email_service::{send_email, Email}; use tracing::{error, info}; @@ -41,8 +41,7 @@ async fn main() -> Result<()> { tracing_subscriber::fmt::init(); let conn = apalis::redis::connect("redis://127.0.0.1/").await?; - let config = apalis::redis::Config::default(); - let storage = RedisStorage::new_with_config(conn, config); + let storage = RedisStorage::new(conn, Config::default()); // This can be in another part of the program produce_jobs(storage.clone()).await?; diff --git a/examples/sentry/src/main.rs b/examples/sentry/src/main.rs index 376507a6..6b1d0cec 100644 --- a/examples/sentry/src/main.rs +++ b/examples/sentry/src/main.rs @@ -9,7 +9,7 @@ use anyhow::Result; use apalis::{ layers::{sentry::SentryLayer, tracing::TraceLayer}, prelude::*, - redis::RedisStorage, + redis::{Config, RedisStorage}, }; use email_service::Email; use tokio::time::sleep; @@ -130,7 +130,7 @@ async fn main() -> Result<()> { .init(); let conn = apalis::redis::connect(redis_url).await?; - let storage = RedisStorage::new(conn); + let storage = RedisStorage::new(conn, Config::default()); //This can be in another part of the program produce_jobs(storage.clone()).await?; diff --git a/examples/tracing/src/main.rs b/examples/tracing/src/main.rs index 64323b5d..f84e7220 100644 --- a/examples/tracing/src/main.rs +++ b/examples/tracing/src/main.rs @@ -8,7 +8,7 @@ use tracing_subscriber::prelude::*; use apalis::{ layers::tracing::TraceLayer, prelude::{Monitor, Storage, WorkerBuilder, WorkerFactoryFn}, - redis::RedisStorage, + redis::{Config, RedisStorage}, utils::TokioExecutor, }; @@ -66,7 +66,7 @@ async fn main() -> Result<()> { let conn = apalis::redis::connect(redis_url) .await .expect("Could not connect to RedisStorage"); - let storage = RedisStorage::new(conn); + let storage = RedisStorage::new(conn, Config::default()); //This can be in another part of the program produce_jobs(storage.clone()).await?; diff --git a/packages/apalis-core/src/codec/message_pack.rs b/packages/apalis-core/src/codec/message_pack.rs deleted file mode 100644 index e69de29b..00000000 diff --git a/packages/apalis-redis/src/lib.rs b/packages/apalis-redis/src/lib.rs index 78151282..8d40e653 100644 --- a/packages/apalis-redis/src/lib.rs +++ b/packages/apalis-redis/src/lib.rs @@ -8,13 +8,13 @@ //! apalis storage using Redis as a backend //! ```rust,no_run //! use apalis::prelude::*; -//! use apalis::redis::RedisStorage; +//! use apalis::redis::{RedisStorage, Config}; //! use email_service::send_email; //! //! #[tokio::main] //! async fn main() { //! let conn = apalis::redis::connect("redis://127.0.0.1/").await.unwrap(); -//! let storage = RedisStorage::new(conn); +//! let storage = RedisStorage::new(conn, Config::default()); //! Monitor::::new() //! .register( //! WorkerBuilder::new("tasty-pear") diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index 0b45d23c..a8099792 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -180,34 +180,45 @@ impl Config { &self.enqueue_scheduled } + /// get the namespace + pub fn get_namespace(&self) -> &String { + &self.namespace + } + /// get the fetch interval - pub fn set_fetch_interval(&mut self, fetch_interval: Duration) { + pub fn set_fetch_interval(mut self, fetch_interval: Duration) -> Self { self.fetch_interval = fetch_interval; + self } /// set the buffer setting - pub fn set_buffer_size(&mut self, buffer_size: usize) { + pub fn set_buffer_size(mut self, buffer_size: usize) -> Self { self.buffer_size = buffer_size; + self } /// set the max-retries setting - pub fn set_max_retries(&mut self, max_retries: usize) { + pub fn set_max_retries(mut self, max_retries: usize) -> Self { self.max_retries = max_retries; + self } /// set the keep-alive setting - pub fn set_keep_alive(&mut self, keep_alive: Duration) { + pub fn set_keep_alive(mut self, keep_alive: Duration) -> Self { self.keep_alive = keep_alive; + self } /// get the enqueued setting - pub fn set_enqueue_scheduled(&mut self, enqueue_scheduled: Duration) { + pub fn set_enqueue_scheduled(mut self, enqueue_scheduled: Duration) -> Self { self.enqueue_scheduled = enqueue_scheduled; + self } /// set the namespace for the Storage - pub fn set_namespace(&mut self, namespace: String) { - self.namespace = namespace; + pub fn set_namespace(mut self, namespace: &str) -> Self { + self.namespace = namespace.to_owned(); + self } /// Returns the Redis key for the list of active jobs associated with the queue. @@ -332,19 +343,21 @@ impl Clone for RedisStorage { } impl RedisStorage { - /// Start a new connection - pub fn new(conn: ConnectionManager) -> Self { - Self::new_with_config(conn, Config::default()) - } - /// Start a new connection providing custom config - pub fn new_with_config(conn: ConnectionManager, config: Config) -> Self { + pub fn new(conn: ConnectionManager, config: Config) -> Self { + Self::new_with_codec(conn, config, JsonCodec) + } + /// Start a new connection providing custom config and a codec + pub fn new_with_codec(conn: ConnectionManager, config: Config, codec: C) -> Self + where + C: Codec, Vec, Error = apalis_core::error::Error> + Sync + Send + 'static, + { RedisStorage { conn, job_type: PhantomData, controller: Controller::new(), config, - codec: Arc::new(Box::new(JsonCodec)), + codec: Arc::new(Box::new(codec)), scripts: RedisScript { ack_job: redis::Script::new(include_str!("../lua/ack_job.lua")), push_job: redis::Script::new(include_str!("../lua/push_job.lua")), @@ -884,7 +897,7 @@ mod tests { // (different runtimes are created for each test), // we don't share the storage and tests must be run sequentially. let conn = connect(redis_url).await.unwrap(); - let storage = RedisStorage::new(conn); + let storage = RedisStorage::new(conn, Config::default()); storage } @@ -898,8 +911,6 @@ mod tests { .expect("failed to Flushdb"); } - struct DummyService {} - fn example_email() -> Email { Email { subject: "Test Subject".to_string(), @@ -1021,7 +1032,7 @@ mod tests { let worker_id = register_worker_at(&mut storage).await; let _job = consume_one(&mut storage, &worker_id).await; - let result = storage + storage .reenqueue_orphaned(5, 300) .await .expect("failed to reenqueue_orphaned"); diff --git a/src/lib.rs b/src/lib.rs index b8464ebe..d432ea3c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -19,7 +19,7 @@ //! ```rust, no_run //! use apalis::prelude::*; //! use serde::{Deserialize, Serialize}; -//! use apalis::redis::RedisStorage; +//! use apalis::redis::{RedisStorage, Config}; //! //! #[derive(Debug, Deserialize, Serialize)] //! struct Email { @@ -34,7 +34,7 @@ //! async fn main() { //! let redis = std::env::var("REDIS_URL").expect("Missing REDIS_URL env variable"); //! let conn = apalis::redis::connect(redis).await.unwrap(); -//! let storage = RedisStorage::new(conn); +//! let storage = RedisStorage::new(conn, Config::default()); //! Monitor::::new() //! .register_with_count(2, { //! WorkerBuilder::new(&format!("quick-sand")) From bcbb015af140d6d03c0ad220f684b17a4d8a2be4 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Tue, 2 Jul 2024 22:34:44 +0300 Subject: [PATCH 11/59] Version: 0.6.0-alpha.1 (#342) * api: for redis and sqlite * Version: 0.6.0-alpha.1 Changelog: - Redis storage doesnt require pool to be clone. Allows use of deadpool-redis among others. - Namespace is picked by default for `new` methods. * fix: docs and tests * lint: cargo clippy and fmt * postgres: add a listener example --- Cargo.toml | 2 +- benches/storages.rs | 2 +- examples/actix-web/src/main.rs | 3 +- examples/axum/src/main.rs | 3 +- examples/postgres/src/main.rs | 20 +- examples/prometheus/src/main.rs | 3 +- examples/redis-deadpool/Cargo.toml | 21 ++ examples/redis-deadpool/src/main.rs | 57 ++++ examples/redis/src/main.rs | 4 +- examples/sentry/src/main.rs | 4 +- examples/tracing/src/main.rs | 4 +- packages/apalis-core/src/builder.rs | 9 +- packages/apalis-core/src/layers.rs | 29 +- packages/apalis-core/src/lib.rs | 60 +++- packages/apalis-core/src/memory.rs | 9 +- packages/apalis-core/src/poller/mod.rs | 24 +- packages/apalis-core/src/request.rs | 9 +- packages/apalis-core/src/storage/mod.rs | 11 +- packages/apalis-core/src/worker/stream.rs | 1 - packages/apalis-redis/src/lib.rs | 2 +- packages/apalis-redis/src/storage.rs | 360 +++++++++++----------- packages/apalis-sql/src/context.rs | 4 +- packages/apalis-sql/src/lib.rs | 5 + packages/apalis-sql/src/mysql.rs | 34 +- packages/apalis-sql/src/postgres.rs | 257 +++++++-------- packages/apalis-sql/src/sqlite.rs | 98 +++--- src/lib.rs | 2 +- 27 files changed, 610 insertions(+), 427 deletions(-) create mode 100644 examples/redis-deadpool/Cargo.toml create mode 100644 examples/redis-deadpool/src/main.rs diff --git a/Cargo.toml b/Cargo.toml index aaf6f19d..35e2f887 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -148,7 +148,7 @@ members = [ "examples/tracing", # "examples/rest-api", "examples/async-std-runtime", - "examples/basics", "examples/redis-with-msg-pack", + "examples/basics", "examples/redis-with-msg-pack", "examples/redis-deadpool", ] diff --git a/benches/storages.rs b/benches/storages.rs index b80f38fd..2e601bb4 100644 --- a/benches/storages.rs +++ b/benches/storages.rs @@ -134,7 +134,7 @@ define_bench!("sqlite_in_memory", { define_bench!("redis", { let conn = apalis::redis::connect(env!("REDIS_URL")).await.unwrap(); - let redis = RedisStorage::new(conn, Config::default()); + let redis = RedisStorage::new(conn); redis }); diff --git a/examples/actix-web/src/main.rs b/examples/actix-web/src/main.rs index d8317105..41e42be5 100644 --- a/examples/actix-web/src/main.rs +++ b/examples/actix-web/src/main.rs @@ -2,7 +2,6 @@ use actix_web::rt::signal; use actix_web::{web, App, HttpResponse, HttpServer}; use anyhow::Result; use apalis::prelude::*; -use apalis::redis::Config; use apalis::utils::TokioExecutor; use apalis::{layers::tracing::TraceLayer, redis::RedisStorage}; use futures::future; @@ -28,7 +27,7 @@ async fn main() -> Result<()> { env_logger::init(); let conn = apalis::redis::connect("redis://127.0.0.1/").await?; - let storage = RedisStorage::new(conn, Config::default()); + let storage = RedisStorage::new(conn); let data = web::Data::new(storage.clone()); let http = async { HttpServer::new(move || { diff --git a/examples/axum/src/main.rs b/examples/axum/src/main.rs index e9bfb3b5..590cd32b 100644 --- a/examples/axum/src/main.rs +++ b/examples/axum/src/main.rs @@ -5,7 +5,6 @@ //! ``` use anyhow::Result; use apalis::prelude::*; -use apalis::redis::Config; use apalis::{layers::tracing::TraceLayer, redis::RedisStorage}; use axum::{ extract::Form, @@ -57,7 +56,7 @@ async fn main() -> Result<()> { .with(tracing_subscriber::fmt::layer()) .init(); let conn = apalis::redis::connect("redis://127.0.0.1/").await?; - let storage = RedisStorage::new(conn, Config::default()); + let storage = RedisStorage::new(conn); // build our application with some routes let app = Router::new() .route("/", get(show_form).post(add_new_job::)) diff --git a/examples/postgres/src/main.rs b/examples/postgres/src/main.rs index 88650dc8..11d235cc 100644 --- a/examples/postgres/src/main.rs +++ b/examples/postgres/src/main.rs @@ -1,15 +1,13 @@ use anyhow::Result; use apalis::layers::retry::RetryPolicy; -use apalis::postgres::PgPool; +use apalis::postgres::{PgListen, PgPool}; use apalis::prelude::*; use apalis::{layers::tracing::TraceLayer, postgres::PostgresStorage}; use email_service::{send_email, Email}; use tower::retry::RetryLayer; use tracing::{debug, info}; -async fn produce_jobs(storage: &PostgresStorage) -> Result<()> { - // The programmatic way - let mut storage = storage.clone(); +async fn produce_jobs(storage: &mut PostgresStorage) -> Result<()> { for index in 0..10 { storage .push(Email { @@ -35,15 +33,23 @@ async fn main() -> Result<()> { .await .expect("unable to run migrations for postgres"); - let pg = PostgresStorage::new(pool); - produce_jobs(&pg).await?; + let mut pg = PostgresStorage::new(pool.clone()); + produce_jobs(&mut pg).await?; + + let mut listener = PgListen::new(pool).await?; + + listener.subscribe_with(&mut pg); + + tokio::spawn(async move { + listener.listen().await.unwrap(); + }); Monitor::::new() .register_with_count(4, { WorkerBuilder::new("tasty-orange") .layer(TraceLayer::new()) .layer(RetryLayer::new(RetryPolicy::retries(5))) - .with_storage(pg.clone()) + .with_storage(pg) .build_fn(send_email) }) .on_event(|e| debug!("{e:?}")) diff --git a/examples/prometheus/src/main.rs b/examples/prometheus/src/main.rs index 0d01f426..4f680cd1 100644 --- a/examples/prometheus/src/main.rs +++ b/examples/prometheus/src/main.rs @@ -5,7 +5,6 @@ //! ``` use anyhow::Result; use apalis::prelude::*; -use apalis::redis::Config; use apalis::{layers::prometheus::PrometheusLayer, redis::RedisStorage}; use axum::{ extract::Form, @@ -31,7 +30,7 @@ async fn main() -> Result<()> { .with(tracing_subscriber::fmt::layer()) .init(); let conn = apalis::redis::connect("redis://127.0.0.1/").await?; - let storage = RedisStorage::new(conn, Config::default()); + let storage = RedisStorage::new(conn); // build our application with some routes let recorder_handle = setup_metrics_recorder(); let app = Router::new() diff --git a/examples/redis-deadpool/Cargo.toml b/examples/redis-deadpool/Cargo.toml new file mode 100644 index 00000000..f0d3424d --- /dev/null +++ b/examples/redis-deadpool/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "redis-deadpool" +version = "0.1.0" +edition = "2021" + +[dependencies] +deadpool-redis = { version = "0.15.1" } +anyhow = "1" +tokio = { version = "1", features = ["full"] } +apalis = { path = "../../", features = ["redis", "timeout"] } +serde = "1" +env_logger = "0.10" +tracing-subscriber = "0.3.11" +chrono = { version = "0.4", default-features = false, features = ["clock"] } +email-service = { path = "../email-service" } +rmp-serde = "1.3" + + +[dependencies.tracing] +default-features = false +version = "0.1" diff --git a/examples/redis-deadpool/src/main.rs b/examples/redis-deadpool/src/main.rs new file mode 100644 index 00000000..c98ce20b --- /dev/null +++ b/examples/redis-deadpool/src/main.rs @@ -0,0 +1,57 @@ +use std::time::Duration; + +use anyhow::Result; +use apalis::prelude::*; +use apalis::redis::RedisStorage; + +use deadpool_redis::{Config, Connection, Runtime}; +use email_service::{send_email, Email}; +use tracing::info; + +#[tokio::main] +async fn main() -> Result<()> { + std::env::set_var("RUST_LOG", "debug"); + + tracing_subscriber::fmt::init(); + + let config = apalis::redis::Config::default() + .set_namespace("apalis::redis-dead-pool") + .set_max_retries(5); + + let cfg = Config::from_url("redis://127.0.0.1/"); + let pool = cfg.create_pool(Some(Runtime::Tokio1)).unwrap(); + let conn = pool.get().await.unwrap(); + let mut storage = RedisStorage::new_with_config(conn, config); + // This can be in another part of the program + produce_jobs(&mut storage).await?; + + let worker = WorkerBuilder::new("rango-tango") + .with_storage(storage) + .data(pool) + .build_fn(send_email); + + Monitor::::new() + .register_with_count(2, worker) + .shutdown_timeout(Duration::from_millis(5000)) + .run_with_signal(async { + tokio::signal::ctrl_c().await?; + info!("Monitor starting shutdown"); + Ok(()) + }) + .await?; + info!("Monitor shutdown complete"); + Ok(()) +} + +async fn produce_jobs(storage: &mut RedisStorage) -> Result<()> { + for index in 0..10 { + storage + .push(Email { + to: index.to_string(), + text: "Test background job from apalis".to_string(), + subject: "Background email job".to_string(), + }) + .await?; + } + Ok(()) +} diff --git a/examples/redis/src/main.rs b/examples/redis/src/main.rs index 20b1018c..87a35ea1 100644 --- a/examples/redis/src/main.rs +++ b/examples/redis/src/main.rs @@ -5,8 +5,8 @@ use std::{ }; use anyhow::Result; +use apalis::prelude::*; use apalis::redis::RedisStorage; -use apalis::{prelude::*, redis::Config}; use email_service::{send_email, Email}; use tracing::{error, info}; @@ -41,7 +41,7 @@ async fn main() -> Result<()> { tracing_subscriber::fmt::init(); let conn = apalis::redis::connect("redis://127.0.0.1/").await?; - let storage = RedisStorage::new(conn, Config::default()); + let storage = RedisStorage::new(conn); // This can be in another part of the program produce_jobs(storage.clone()).await?; diff --git a/examples/sentry/src/main.rs b/examples/sentry/src/main.rs index 6b1d0cec..376507a6 100644 --- a/examples/sentry/src/main.rs +++ b/examples/sentry/src/main.rs @@ -9,7 +9,7 @@ use anyhow::Result; use apalis::{ layers::{sentry::SentryLayer, tracing::TraceLayer}, prelude::*, - redis::{Config, RedisStorage}, + redis::RedisStorage, }; use email_service::Email; use tokio::time::sleep; @@ -130,7 +130,7 @@ async fn main() -> Result<()> { .init(); let conn = apalis::redis::connect(redis_url).await?; - let storage = RedisStorage::new(conn, Config::default()); + let storage = RedisStorage::new(conn); //This can be in another part of the program produce_jobs(storage.clone()).await?; diff --git a/examples/tracing/src/main.rs b/examples/tracing/src/main.rs index f84e7220..64323b5d 100644 --- a/examples/tracing/src/main.rs +++ b/examples/tracing/src/main.rs @@ -8,7 +8,7 @@ use tracing_subscriber::prelude::*; use apalis::{ layers::tracing::TraceLayer, prelude::{Monitor, Storage, WorkerBuilder, WorkerFactoryFn}, - redis::{Config, RedisStorage}, + redis::RedisStorage, utils::TokioExecutor, }; @@ -66,7 +66,7 @@ async fn main() -> Result<()> { let conn = apalis::redis::connect(redis_url) .await .expect("Could not connect to RedisStorage"); - let storage = RedisStorage::new(conn, Config::default()); + let storage = RedisStorage::new(conn); //This can be in another part of the program produce_jobs(storage.clone()).await?; diff --git a/packages/apalis-core/src/builder.rs b/packages/apalis-core/src/builder.rs index 3bd05b19..2449eef9 100644 --- a/packages/apalis-core/src/builder.rs +++ b/packages/apalis-core/src/builder.rs @@ -167,8 +167,9 @@ where S::Future: Send, S::Response: 'static, - P::Layer: Layer, - M: Layer<>::Service>, + M: Layer, + // P::Layer: Layer, + // M: Layer<>::Service>, { type Source = P; @@ -176,9 +177,9 @@ where /// Build a worker, given a tower service fn build(self, service: S) -> Worker> { let worker_id = self.id; - let common_layer = self.source.common_layer(worker_id.clone()); + // let common_layer = self.source.common_layer(worker_id.clone()); let poller = self.source; - let middleware = self.layer.layer(common_layer); + let middleware = self.layer; let service = middleware.service(service); Worker::new(worker_id, Ready::new(service, poller)) diff --git a/packages/apalis-core/src/layers.rs b/packages/apalis-core/src/layers.rs index 59108edf..16355bfd 100644 --- a/packages/apalis-core/src/layers.rs +++ b/packages/apalis-core/src/layers.rs @@ -1,10 +1,13 @@ +use futures::channel::mpsc::{SendError, Sender}; +use futures::SinkExt; use std::marker::PhantomData; use std::{fmt, sync::Arc}; -pub use tower::{layer::layer_fn, util::BoxCloneService, Layer, Service, ServiceBuilder}; - -use futures::{future::BoxFuture, Future, FutureExt}; +pub use tower::{ + layer::layer_fn, layer::util::Identity, util::BoxCloneService, Layer, Service, ServiceBuilder, +}; use crate::{request::Request, worker::WorkerId}; +use futures::{future::BoxFuture, Future, FutureExt}; /// A generic layer that has been stripped off types. /// This is returned by a [crate::Backend] and can be used to customize the middleware of the service consuming tasks @@ -158,12 +161,28 @@ pub trait Ack { type Error: std::error::Error; /// Acknowledges successful processing of the given request fn ack( - &self, + &mut self, worker_id: &WorkerId, data: &Self::Acknowledger, ) -> impl Future> + Send; } +/// A generic stream that emits (worker_id, task_id) +#[derive(Debug)] +pub struct AckStream(pub Sender<(WorkerId, A)>); + +impl Ack for AckStream { + type Acknowledger = A; + type Error = SendError; + fn ack( + &mut self, + worker_id: &WorkerId, + data: &Self::Acknowledger, + ) -> impl Future> + Send { + self.0.send((worker_id.clone(), data.clone())).boxed() + } +} + /// A layer that acknowledges a job completed successfully #[derive(Debug)] pub struct AckLayer, J> { @@ -244,7 +263,7 @@ where } fn call(&mut self, request: Request) -> Self::Future { - let ack = self.ack.clone(); + let mut ack = self.ack.clone(); let worker_id = self.worker_id.clone(); let data = request.get::<>::Acknowledger>().cloned(); diff --git a/packages/apalis-core/src/lib.rs b/packages/apalis-core/src/lib.rs index afbcdb9e..8fb411d9 100644 --- a/packages/apalis-core/src/lib.rs +++ b/packages/apalis-core/src/lib.rs @@ -77,14 +77,8 @@ pub trait Backend { /// Returns the final decoration of layers type Layer; - /// Allows the backend to decorate the service with [Layer] - /// - /// [Layer]: tower::Layer - #[allow(unused)] - fn common_layer(&self, worker: WorkerId) -> Self::Layer; - /// Returns a poller that is ready for streaming - fn poll(self, worker: WorkerId) -> Poller; + fn poll(self, worker: WorkerId) -> Poller; } /// This allows encoding and decoding of requests in different backends @@ -105,6 +99,58 @@ pub async fn sleep(duration: std::time::Duration) { futures_timer::Delay::new(duration).await; } +#[cfg(feature = "sleep")] +/// Interval utilities +pub mod interval { + use std::fmt; + use std::future::Future; + use std::pin::Pin; + use std::task::{Context, Poll}; + use std::time::Duration; + + use futures::future::BoxFuture; + use futures::Stream; + + use crate::sleep; + /// Creates a new stream that yields at a set interval. + pub fn interval(duration: Duration) -> Interval { + Interval { + timer: Box::pin(sleep(duration)), + interval: duration, + } + } + + /// A stream representing notifications at fixed interval + #[must_use = "streams do nothing unless polled or .awaited"] + pub struct Interval { + timer: BoxFuture<'static, ()>, + interval: Duration, + } + + impl fmt::Debug for Interval { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Interval") + .field("interval", &self.interval) + .field("timer", &"a future represented `apalis_core::sleep`") + .finish() + } + } + + impl Stream for Interval { + type Item = (); + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match Pin::new(&mut self.timer).poll(cx) { + Poll::Ready(_) => {} + Poll::Pending => return Poll::Pending, + }; + let interval = self.interval; + let _ = std::mem::replace(&mut self.timer, Box::pin(sleep(interval))); + Poll::Ready(Some(())) + } + } +} + #[cfg(test)] #[doc(hidden)] #[derive(Debug, Default, Clone)] diff --git a/packages/apalis-core/src/memory.rs b/packages/apalis-core/src/memory.rs index dd832a52..f4a2da6d 100644 --- a/packages/apalis-core/src/memory.rs +++ b/packages/apalis-core/src/memory.rs @@ -14,7 +14,7 @@ use std::{ sync::Arc, task::{Context, Poll}, }; -use tower::{layer::util::Identity, ServiceBuilder}; +use tower::layer::util::Identity; #[derive(Debug)] /// An example of the basics of a backend @@ -99,17 +99,14 @@ impl Stream for MemoryWrapper { impl Backend> for MemoryStorage { type Stream = BackendStream>>; - type Layer = ServiceBuilder; - - fn common_layer(&self, _worker: WorkerId) -> Self::Layer { - ServiceBuilder::new() - } + type Layer = Identity; fn poll(self, _worker: WorkerId) -> Poller { let stream = self.inner.map(|r| Ok(Some(Request::new(r)))).boxed(); Poller { stream: BackendStream::new(stream, self.controller), heartbeat: Box::pin(async {}), + layer: Identity::new(), } } } diff --git a/packages/apalis-core/src/poller/mod.rs b/packages/apalis-core/src/poller/mod.rs index 4e3855d3..03c17313 100644 --- a/packages/apalis-core/src/poller/mod.rs +++ b/packages/apalis-core/src/poller/mod.rs @@ -3,6 +3,7 @@ use std::{ fmt::{self, Debug}, ops::{Deref, DerefMut}, }; +use tower::layer::util::Identity; /// Util for controlling pollers pub mod controller; @@ -10,29 +11,46 @@ pub mod controller; pub mod stream; /// A poller type that allows fetching from a stream and a heartbeat future that can be used to do periodic tasks -pub struct Poller { +pub struct Poller { pub(crate) stream: S, pub(crate) heartbeat: BoxFuture<'static, ()>, + pub(crate) layer: L, } -impl Poller { +impl Poller { /// Build a new poller pub fn new(stream: S, heartbeat: impl Future + Send + 'static) -> Self { Self { stream, heartbeat: heartbeat.boxed(), + layer: Identity::new(), + } + } + + /// Build a poller with layer + pub fn new_with_layer( + stream: S, + heartbeat: impl Future + Send + 'static, + layer: L, + ) -> Poller { + Poller { + stream, + heartbeat: heartbeat.boxed(), + layer, } } } -impl Debug for Poller +impl Debug for Poller where S: Debug, + L: Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Poller") .field("stream", &self.stream) .field("heartbeat", &"...") + .field("layer", &self.layer) .finish() } } diff --git a/packages/apalis-core/src/request.rs b/packages/apalis-core/src/request.rs index 2468b767..d6478240 100644 --- a/packages/apalis-core/src/request.rs +++ b/packages/apalis-core/src/request.rs @@ -1,6 +1,6 @@ use futures::{future::BoxFuture, Stream}; use serde::{Deserialize, Serialize}; -use tower::{layer::util::Identity, ServiceBuilder}; +use tower::layer::util::Identity; use std::{fmt::Debug, pin::Pin}; @@ -64,16 +64,13 @@ pub type RequestStream = BoxStream<'static, Result, Error>>; impl Backend> for RequestStream> { type Stream = Self; - type Layer = ServiceBuilder; - - fn common_layer(&self, _worker: WorkerId) -> Self::Layer { - ServiceBuilder::new() - } + type Layer = Identity; fn poll(self, _worker: WorkerId) -> Poller { Poller { stream: self, heartbeat: Box::pin(async {}), + layer: Identity::new(), } } } diff --git a/packages/apalis-core/src/storage/mod.rs b/packages/apalis-core/src/storage/mod.rs index a8e3d1e6..5e2aedb3 100644 --- a/packages/apalis-core/src/storage/mod.rs +++ b/packages/apalis-core/src/storage/mod.rs @@ -8,7 +8,6 @@ use crate::{request::Request, Backend}; pub type StorageStream = BoxStream<'static, Result>, E>>; /// Represents a [Storage] that can persist a request. -/// The underlying type must implement [Job] pub trait Storage: Backend> { /// The type of job that can be persisted type Job; @@ -33,17 +32,17 @@ pub trait Storage: Backend> { ) -> impl Future> + Send; /// Return the number of pending jobs from the queue - fn len(&self) -> impl Future> + Send; + fn len(&mut self) -> impl Future> + Send; /// Fetch a job given an id fn fetch_by_id( - &self, + &mut self, job_id: &Self::Identifier, ) -> impl Future>, Self::Error>> + Send; /// Update a job details fn update( - &self, + &mut self, job: Request, ) -> impl Future> + Send; @@ -55,8 +54,8 @@ pub trait Storage: Backend> { ) -> impl Future> + Send; /// Returns true if there is no jobs in the storage - fn is_empty(&self) -> impl Future> + Send; + fn is_empty(&mut self) -> impl Future> + Send; /// Vacuum the storage, removes done and killed jobs - fn vacuum(&self) -> impl Future> + Send; + fn vacuum(&mut self) -> impl Future> + Send; } diff --git a/packages/apalis-core/src/worker/stream.rs b/packages/apalis-core/src/worker/stream.rs index 820e2897..e2a9ec81 100644 --- a/packages/apalis-core/src/worker/stream.rs +++ b/packages/apalis-core/src/worker/stream.rs @@ -4,7 +4,6 @@ use std::task::{Context, Poll}; use super::WorkerNotify; -// Define your struct pub(crate) struct WorkerStream where S: Stream, diff --git a/packages/apalis-redis/src/lib.rs b/packages/apalis-redis/src/lib.rs index 8d40e653..3c3c96cf 100644 --- a/packages/apalis-redis/src/lib.rs +++ b/packages/apalis-redis/src/lib.rs @@ -14,7 +14,7 @@ //! #[tokio::main] //! async fn main() { //! let conn = apalis::redis::connect("redis://127.0.0.1/").await.unwrap(); -//! let storage = RedisStorage::new(conn, Config::default()); +//! let storage = RedisStorage::new(conn); //! Monitor::::new() //! .register( //! WorkerBuilder::new("tasty-pear") diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index a8099792..700b4638 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -1,7 +1,6 @@ use apalis_core::codec::json::JsonCodec; use apalis_core::data::Extensions; -use apalis_core::error::Error; -use apalis_core::layers::{Ack, AckLayer}; +use apalis_core::layers::{AckLayer, AckStream}; use apalis_core::poller::controller::Controller; use apalis_core::poller::stream::BackendStream; use apalis_core::poller::Poller; @@ -12,16 +11,19 @@ use apalis_core::task::namespace::Namespace; use apalis_core::task::task_id::TaskId; use apalis_core::worker::WorkerId; use apalis_core::{Backend, Codec}; -use async_stream::try_stream; use chrono::Utc; -use futures::{FutureExt, TryFutureExt, TryStreamExt}; +use futures::channel::mpsc; +use futures::{select, FutureExt, SinkExt, StreamExt, TryFutureExt}; use log::*; +use redis::aio::ConnectionLike; use redis::ErrorKind; use redis::{aio::ConnectionManager, Client, IntoConnectionInfo, RedisError, Script, Value}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use std::fmt; + +use std::any::type_name; use std::num::TryFromIntError; use std::sync::Arc; +use std::{fmt, io}; use std::{marker::PhantomData, time::Duration}; /// Shorthand to create a client and connect @@ -133,7 +135,7 @@ struct Context { /// Config for a [RedisStorage] #[derive(Clone, Debug)] pub struct Config { - fetch_interval: Duration, + poll_interval: Duration, buffer_size: usize, max_retries: usize, keep_alive: Duration, @@ -144,7 +146,7 @@ pub struct Config { impl Default for Config { fn default() -> Self { Self { - fetch_interval: Duration::from_millis(100), + poll_interval: Duration::from_millis(100), buffer_size: 10, max_retries: 5, keep_alive: Duration::from_secs(30), @@ -155,9 +157,9 @@ impl Default for Config { } impl Config { - /// Get the rate of polling per unit of time - pub fn get_fetch_interval(&self) -> &Duration { - &self.fetch_interval + /// Get the interval of polling + pub fn get_poll_interval(&self) -> &Duration { + &self.poll_interval } /// Get the number of jobs to fetch @@ -185,9 +187,9 @@ impl Config { &self.namespace } - /// get the fetch interval - pub fn set_fetch_interval(mut self, fetch_interval: Duration) -> Self { - self.fetch_interval = fetch_interval; + /// get the poll interval + pub fn set_poll_interval(mut self, poll_interval: Duration) -> Self { + self.poll_interval = poll_interval; self } @@ -309,8 +311,8 @@ pub type RedisCodec = Arc< >; /// Represents a [Storage] that uses Redis for storage. -pub struct RedisStorage { - conn: ConnectionManager, +pub struct RedisStorage { + conn: Conn, job_type: PhantomData, scripts: RedisScript, controller: Controller, @@ -318,7 +320,7 @@ pub struct RedisStorage { codec: RedisCodec, } -impl fmt::Debug for RedisStorage { +impl fmt::Debug for RedisStorage { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RedisStorage") .field("conn", &"ConnectionManager") @@ -329,7 +331,7 @@ impl fmt::Debug for RedisStorage { } } -impl Clone for RedisStorage { +impl Clone for RedisStorage { fn clone(&self) -> Self { Self { conn: self.conn.clone(), @@ -342,13 +344,23 @@ impl Clone for RedisStorage { } } -impl RedisStorage { - /// Start a new connection providing custom config - pub fn new(conn: ConnectionManager, config: Config) -> Self { +impl RedisStorage { + /// Start a new connection + pub fn new(conn: Conn) -> Self { + Self::new_with_codec( + conn, + Config::default().set_namespace(type_name::()), + JsonCodec, + ) + } + + /// Start a connection with a custom config + pub fn new_with_config(conn: Conn, config: Config) -> Self { Self::new_with_codec(conn, config, JsonCodec) } + /// Start a new connection providing custom config and a codec - pub fn new_with_codec(conn: ConnectionManager, config: Config, codec: C) -> Self + pub fn new_with_codec(conn: Conn, config: Config, codec: C) -> Self where C: Codec, Vec, Error = apalis_core::error::Error> + Sync + Send + 'static, { @@ -381,8 +393,8 @@ impl RedisStorage { } /// Get current connection - pub fn get_connection(&self) -> ConnectionManager { - self.conn.clone() + pub fn get_connection(&self) -> &Conn { + &self.conn } /// Get the config used by the storage @@ -396,61 +408,68 @@ impl RedisStorage { } } -impl Backend> - for RedisStorage +impl< + T: Serialize + DeserializeOwned + Sync + Send + Unpin + 'static, + Conn: ConnectionLike + Send + Sync + 'static, + > Backend> for RedisStorage { type Stream = BackendStream>>; - type Layer = AckLayer, T>; - - fn common_layer(&self, worker_id: WorkerId) -> Self::Layer { - AckLayer::new(self.clone(), worker_id) - } + type Layer = AckLayer, T>; - fn poll(self, worker: WorkerId) -> Poller { - let mut storage = self.clone(); + fn poll(mut self, worker: WorkerId) -> Poller { + let (mut tx, rx) = mpsc::channel(self.config.buffer_size); + let (ack_tx, ack_rx) = mpsc::channel(self.config.buffer_size); + let ack = AckStream(ack_tx); + let layer = AckLayer::new(ack, worker.clone()); let controller = self.controller.clone(); let config = self.config.clone(); - let stream: RequestStream> = Box::pin( - self.stream_jobs(&worker, config.fetch_interval, config.buffer_size) - .map_err(|e| Error::SourceError(e.into())), - ); + let stream: RequestStream> = Box::pin(rx); + let heartbeat = async move { + let mut keep_alive_stm = apalis_core::interval::interval(config.keep_alive).fuse(); + + let mut enqueue_scheduled_stm = + apalis_core::interval::interval(config.enqueue_scheduled).fuse(); + + let mut poll_next_stm = apalis_core::interval::interval(config.poll_interval).fuse(); + + // TODO: use .ready_chunks(config.buffer_size) + // TODO: create a ack_jobs.loa + let mut ack_stream = ack_rx.fuse(); - let keep_alive = async move { - loop { - if let Err(e) = storage.keep_alive(&worker).await { - error!("Could not call keep_alive for Worker [{worker}]: {e}") - } - apalis_core::sleep(config.keep_alive).await; - } - } - .boxed(); - let mut storage = self.clone(); - let enqueue_scheduled = async move { loop { - if let Err(e) = storage.enqueue_scheduled(config.buffer_size).await { - error!("Could not call enqueue_scheduled: {e}") - } - apalis_core::sleep(config.enqueue_scheduled).await; + select! { + _ = keep_alive_stm.next() => { + self.keep_alive(&worker).await.unwrap(); + } + _ = enqueue_scheduled_stm.next() => { + self.enqueue_scheduled(config.buffer_size).await.unwrap(); + } + _ = poll_next_stm.next() => { + let res = self.fetch_next(&worker).await.unwrap(); + for job in res { + tx.send(Ok(Some(job))).await.unwrap(); + } + } + id_to_ack = ack_stream.next() => { + if let Some((worker_id, task_id)) = id_to_ack { + self.ack(&worker_id, &task_id).await.unwrap(); + } + } + }; } - } - .boxed(); - let heartbeat = async move { - futures::join!(enqueue_scheduled, keep_alive); }; - Poller::new(BackendStream::new(stream, controller), heartbeat.boxed()) + Poller::new_with_layer( + BackendStream::new(stream, controller), + heartbeat.boxed(), + layer, + ) } } -impl Ack for RedisStorage { - type Acknowledger = TaskId; - type Error = RedisError; - async fn ack( - &self, - worker_id: &WorkerId, - task_id: &Self::Acknowledger, - ) -> Result<(), RedisError> { - let mut conn = self.conn.clone(); +impl RedisStorage { + /// Ack a job + pub async fn ack(&mut self, worker_id: &WorkerId, task_id: &TaskId) -> Result<(), RedisError> { let ack_job = self.scripts.ack_job.clone(); let inflight_set = format!("{}:{}", self.config.inflight_jobs_set(), worker_id); let done_jobs_set = &self.config.done_jobs_set(); @@ -462,19 +481,17 @@ impl Ack for RedisStorage { .key(done_jobs_set) .arg(task_id.to_string()) .arg(now) - .invoke_async(&mut conn) + .invoke_async(&mut self.conn) .await } } -impl RedisStorage { - fn stream_jobs( - &self, - worker_id: &WorkerId, - interval: Duration, - buffer_size: usize, - ) -> RequestStream> { - let mut conn = self.conn.clone(); +impl< + T: DeserializeOwned + Send + Unpin + Send + Sync + 'static, + Conn: ConnectionLike + Send + Sync + 'static, + > RedisStorage +{ + async fn fetch_next(&mut self, worker_id: &WorkerId) -> Result>, RedisError> { let fetch_jobs = self.scripts.get_jobs.clone(); let consumers_set = self.config.consumers_set(); let active_jobs_list = self.config.active_jobs_list(); @@ -483,65 +500,66 @@ impl RedisStorage let signal_list = self.config.signal_list(); let codec = self.codec.clone(); let namespace = self.config.namespace.clone(); - Box::pin(try_stream! { - loop { - apalis_core::sleep(interval).await; - let result = fetch_jobs - .key(&consumers_set) - .key(&active_jobs_list) - .key(&inflight_set) - .key(&job_data_hash) - .key(&signal_list) - .arg(buffer_size) // No of jobs to fetch - .arg(&inflight_set) - .invoke_async::<_, Vec>(&mut conn).await; - match result { - Ok(jobs) => { - for job in jobs { - let request = deserialize_job(&job).map(|res| codec.decode(res)).transpose()?.map(Into::into).map(|mut req: Request| { - req.insert(Namespace(namespace.clone())); - req - }); - yield request - } - }, - Err(e) => { - warn!("An error occurred during streaming jobs: {e}"); - } - } + let result = fetch_jobs + .key(&consumers_set) + .key(&active_jobs_list) + .key(&inflight_set) + .key(&job_data_hash) + .key(&signal_list) + .arg(self.config.buffer_size) // No of jobs to fetch + .arg(&inflight_set) + .invoke_async::<_, Vec>(&mut self.conn) + .await; + match result { + Ok(jobs) => { + let mut processed = vec![]; + for job in jobs { + let bytes = deserialize_job(&job)?; + let request = codec + .decode(bytes) + .map(Into::into) + .map(|mut req: Request| { + req.insert(Namespace(namespace.clone())); + req + }) + .map_err(|e| build_error(&e.to_string()))?; + processed.push(request) + } + Ok(processed) } - }) + Err(e) => { + warn!("An error occurred during streaming jobs: {e}"); + Err(e) + } + } } } -fn deserialize_job(job: &Value) -> Option<&Vec> { - let job = match job { - job @ Value::Data(_) => Some(job), - Value::Bulk(val) => val.first(), - _ => { - error!( - "Decoding Message Failed: {:?}", - "unknown result type for next message" - ); - None - } - }; +fn build_error(message: &str) -> RedisError { + RedisError::from(io::Error::new(io::ErrorKind::InvalidData, message)) +} +fn deserialize_job(job: &Value) -> Result<&Vec, RedisError> { match job { - Some(Value::Data(v)) => Some(v), - None => None, - _ => { - error!("Decoding Message Failed: {:?}", "Expected Data(&Vec)"); - None - } + Value::Data(bytes) => Ok(bytes), + Value::Bulk(val) => val + .first() + .and_then(|val| { + if let Value::Data(bytes) = val { + Some(bytes) + } else { + None + } + }) + .ok_or(build_error("Value::Bulk: Invalid data returned by storage")), + _ => Err(build_error("unknown result type for next message")), } } -impl RedisStorage { +impl RedisStorage { async fn keep_alive(&mut self, worker_id: &WorkerId) -> Result<(), RedisError> { - let mut conn = self.conn.clone(); let register_consumer = self.scripts.register_consumer.clone(); let inflight_set = format!("{}:{}", self.config.inflight_jobs_set(), worker_id); let consumers_set = self.config.consumers_set(); @@ -552,12 +570,12 @@ impl RedisStorage { .key(consumers_set) .arg(now) .arg(inflight_set) - .invoke_async(&mut conn) + .invoke_async(&mut self.conn) .await } } -impl Storage for RedisStorage +impl Storage for RedisStorage where T: Serialize + DeserializeOwned + Send + 'static + Unpin + Sync, { @@ -566,7 +584,7 @@ where type Identifier = TaskId; async fn push(&mut self, job: Self::Job) -> Result { - let mut conn = self.conn.clone(); + let conn = &mut self.conn; let push_job = self.scripts.push_job.clone(); let job_data_hash = self.config.job_data_hash(); let active_jobs_list = self.config.active_jobs_list(); @@ -586,13 +604,12 @@ where .key(signal_list) .arg(job_id.to_string()) .arg(job) - .invoke_async(&mut conn) + .invoke_async(conn) .await?; Ok(job_id.clone()) } async fn schedule(&mut self, job: Self::Job, on: i64) -> Result { - let mut conn = self.conn.clone(); let schedule_job = self.scripts.schedule_job.clone(); let job_data_hash = self.config.job_data_hash(); let scheduled_jobs_set = self.config.scheduled_jobs_set(); @@ -612,51 +629,44 @@ where .arg(job_id.to_string()) .arg(job) .arg(on) - .invoke_async(&mut conn) + .invoke_async(&mut self.conn) .await?; Ok(job_id.clone()) } - async fn len(&self) -> Result { - let mut conn = self.conn.clone(); + async fn len(&mut self) -> Result { let all_jobs: i64 = redis::cmd("HLEN") .arg(&self.config.job_data_hash()) - .query_async(&mut conn) + .query_async(&mut self.conn) .await?; let done_jobs: i64 = redis::cmd("ZCOUNT") .arg(self.config.done_jobs_set()) .arg("-inf") .arg("+inf") - .query_async(&mut conn) + .query_async(&mut self.conn) .await?; Ok(all_jobs - done_jobs) } - async fn fetch_by_id(&self, job_id: &TaskId) -> Result>, RedisError> { - let mut conn = self.conn.clone(); + async fn fetch_by_id( + &mut self, + job_id: &TaskId, + ) -> Result>, RedisError> { let data: Value = redis::cmd("HMGET") .arg(&self.config.job_data_hash()) .arg(job_id.to_string()) - .query_async(&mut conn) + .query_async(&mut self.conn) .await?; - let job = deserialize_job(&data); - match job { - None => Err(RedisError::from(( - ErrorKind::ResponseError, - "Invalid data returned by storage", - ))), - Some(bytes) => { - let inner = self - .codec - .decode(bytes) - .map_err(|e| (ErrorKind::IoError, "Decode error", e.to_string()))?; - Ok(Some(inner.into())) - } - } + let bytes = deserialize_job(&data)?; + + let inner = self + .codec + .decode(bytes) + .map_err(|e| (ErrorKind::IoError, "Decode error", e.to_string()))?; + Ok(Some(inner.into())) } - async fn update(&self, job: Request) -> Result<(), RedisError> { + async fn update(&mut self, job: Request) -> Result<(), RedisError> { let job = job.try_into()?; - let mut conn = self.conn.clone(); let bytes = self .codec .encode(&job) @@ -665,13 +675,12 @@ where .arg(&self.config.job_data_hash()) .arg(job.ctx.id.to_string()) .arg(bytes) - .query_async(&mut conn) + .query_async(&mut self.conn) .await?; Ok(()) } async fn reschedule(&mut self, job: Request, wait: Duration) -> Result<(), RedisError> { - let mut conn = self.conn.clone(); let schedule_job = self.scripts.schedule_job.clone(); let job_id = job .get::() @@ -697,13 +706,13 @@ where redis::cmd("SREM") .arg(inflight_set) .arg(job_id.to_string()) - .query_async(&mut conn) + .query_async(&mut self.conn) .await?; redis::cmd("ZADD") .arg(failed_jobs_set) .arg(on) .arg(job_id.to_string()) - .query_async(&mut conn) + .query_async(&mut self.conn) .await?; schedule_job .key(job_data_hash) @@ -711,41 +720,38 @@ where .arg(job_id.to_string()) .arg(job) .arg(on + wait) - .invoke_async(&mut conn) + .invoke_async(&mut self.conn) .await } - async fn is_empty(&self) -> Result { + async fn is_empty(&mut self) -> Result { self.len().map_ok(|res| res == 0).await } - async fn vacuum(&self) -> Result { + async fn vacuum(&mut self) -> Result { let vacuum_script = self.scripts.vacuum.clone(); - let mut conn = self.conn.clone(); - vacuum_script .key(self.config.dead_jobs_set()) .key(self.config.job_data_hash()) - .invoke_async(&mut conn) + .invoke_async(&mut self.conn) .await } } -impl RedisStorage { +impl RedisStorage { /// Attempt to retry a job pub async fn retry(&mut self, worker_id: &WorkerId, task_id: &TaskId) -> Result where T: Send + DeserializeOwned + Serialize + Unpin + Sync + 'static, { - let mut conn = self.conn.clone(); let retry_job = self.scripts.retry_job.clone(); let inflight_set = format!("{}:{}", self.config.inflight_jobs_set(), worker_id); let scheduled_jobs_set = self.config.scheduled_jobs_set(); let job_data_hash = self.config.job_data_hash(); - let job_fut = self.fetch_by_id(task_id); let failed_jobs_set = self.config.failed_jobs_set(); - let mut storage = self.clone(); + let job_fut = self.fetch_by_id(task_id); let now: i64 = Utc::now().timestamp(); let res = job_fut.await?; + let conn = &mut self.conn; match res { Some(job) => { let attempt = job.get::().cloned().unwrap_or_default(); @@ -754,9 +760,9 @@ impl RedisStorage { .arg(failed_jobs_set) .arg(now) .arg(task_id.to_string()) - .query_async(&mut conn) + .query_async(conn) .await?; - storage.kill(worker_id, task_id).await?; + self.kill(worker_id, task_id).await?; return Ok(1); } let job = self @@ -771,7 +777,7 @@ impl RedisStorage { .arg(task_id.to_string()) .arg(now) .arg(job) - .invoke_async(&mut conn) + .invoke_async(conn) .await; match res { Ok(count) => Ok(count), @@ -787,7 +793,6 @@ impl RedisStorage { where T: Send + DeserializeOwned + Serialize + Unpin + Sync + 'static, { - let mut conn = self.conn.clone(); let kill_job = self.scripts.kill_job.clone(); let current_worker_id = format!("{}:{}", self.config.inflight_jobs_set(), worker_id); let job_data_hash = self.config.job_data_hash(); @@ -808,7 +813,7 @@ impl RedisStorage { .arg(task_id.to_string()) .arg(now) .arg(data) - .invoke_async(&mut conn) + .invoke_async(&mut self.conn) .await } None => Err(RedisError::from((ErrorKind::ResponseError, "Id not found"))), @@ -838,7 +843,6 @@ impl RedisStorage { /// Re-enqueue some jobs that might be abandoned. pub async fn reenqueue_active(&mut self, job_ids: Vec<&TaskId>) -> Result<(), RedisError> { - let mut conn = self.conn.clone(); let reenqueue_active = self.scripts.reenqueue_active.clone(); let inflight_set = self.config.inflight_jobs_set().to_string(); let active_jobs_list = self.config.active_jobs_list(); @@ -854,7 +858,7 @@ impl RedisStorage { .map(|j| j.to_string()) .collect::>(), ) - .invoke_async(&mut conn) + .invoke_async(&mut self.conn) .await } /// Re-enqueue some jobs that might be orphaned. @@ -886,7 +890,6 @@ impl RedisStorage { #[cfg(test)] mod tests { use email_service::Email; - use futures::StreamExt; use super::*; @@ -897,7 +900,7 @@ mod tests { // (different runtimes are created for each test), // we don't share the storage and tests must be run sequentially. let conn = connect(redis_url).await.unwrap(); - let storage = RedisStorage::new(conn, Config::default()); + let storage = RedisStorage::new(conn); storage } @@ -919,14 +922,17 @@ mod tests { } } - async fn consume_one(storage: &RedisStorage, worker_id: &WorkerId) -> Request { - let mut stream = storage.stream_jobs(worker_id, std::time::Duration::from_secs(10), 1); + async fn consume_one( + storage: &mut RedisStorage, + worker_id: &WorkerId, + ) -> Request { + let stream = storage.fetch_next(worker_id); stream - .next() .await .expect("stream is empty") + .first() .expect("failed to poll job") - .expect("no job is pending") + .clone() } async fn register_worker_at(storage: &mut RedisStorage) -> WorkerId { diff --git a/packages/apalis-sql/src/context.rs b/packages/apalis-sql/src/context.rs index 50cc0cf8..5bc1b135 100644 --- a/packages/apalis-sql/src/context.rs +++ b/packages/apalis-sql/src/context.rs @@ -6,7 +6,7 @@ use sqlx::types::chrono::{DateTime, Utc}; use std::{fmt, str::FromStr}; /// The context for a job is represented here -/// Used to provide a context when a job is defined through the [Job] trait +/// Used to provide a context for a job with an sql backend #[derive(Debug, Clone)] pub struct SqlContext { id: TaskId, @@ -127,7 +127,7 @@ impl SqlContext { } } -/// Represents the state of a [Request] +/// Represents the state of a job #[derive(Serialize, Deserialize, Debug, Clone, Hash, PartialEq, std::cmp::Eq)] pub enum State { /// Job is pending diff --git a/packages/apalis-sql/src/lib.rs b/packages/apalis-sql/src/lib.rs index ed611938..e74c7a5a 100644 --- a/packages/apalis-sql/src/lib.rs +++ b/packages/apalis-sql/src/lib.rs @@ -54,6 +54,11 @@ impl Default for Config { } impl Config { + /// Create a new config with a jobs namespace + pub fn new(namespace: &str) -> Self { + Config::default().namespace(namespace) + } + /// Interval between database poll queries /// /// Defaults to 30ms diff --git a/packages/apalis-sql/src/mysql.rs b/packages/apalis-sql/src/mysql.rs index d894b59a..be54ed67 100644 --- a/packages/apalis-sql/src/mysql.rs +++ b/packages/apalis-sql/src/mysql.rs @@ -19,6 +19,7 @@ use serde_json::Value; use sqlx::mysql::MySqlRow; use sqlx::types::chrono::{DateTime, Utc}; use sqlx::{MySql, Pool, Row}; +use std::any::type_name; use std::convert::TryInto; use std::sync::Arc; use std::{fmt, io}; @@ -89,7 +90,7 @@ impl MysqlStorage<()> { impl MysqlStorage { /// Create a new instance from a pool pub fn new(pool: MySqlPool) -> Self { - Self::new_with_config(pool, Config::default()) + Self::new_with_config(pool, Config::new(type_name::())) } /// Create a new instance from a pool and custom config @@ -248,7 +249,7 @@ where } async fn fetch_by_id( - &self, + &mut self, job_id: &TaskId, ) -> Result>, sqlx::Error> { let pool = self.pool.clone(); @@ -272,7 +273,7 @@ where } } - async fn len(&self) -> Result { + async fn len(&mut self) -> Result { let pool = self.pool.clone(); let query = "Select Count(*) as count from jobs where status='Pending'"; @@ -303,7 +304,7 @@ where Ok(()) } - async fn update(&self, job: Request) -> Result<(), sqlx::Error> { + async fn update(&mut self, job: Request) -> Result<(), sqlx::Error> { let pool = self.pool.clone(); let ctx = job .get::() @@ -339,11 +340,11 @@ where Ok(()) } - async fn is_empty(&self) -> Result { + async fn is_empty(&mut self) -> Result { Ok(self.len().await? == 0) } - async fn vacuum(&self) -> Result { + async fn vacuum(&mut self) -> Result { let pool = self.pool.clone(); let query = "Delete from jobs where status='Done'"; let record = sqlx::query(query).execute(&pool).await?; @@ -358,11 +359,8 @@ impl Backend, T>; - fn common_layer(&self, worker_id: WorkerId) -> Self::Layer { - AckLayer::new(self.clone(), worker_id) - } - - fn poll(self, worker: WorkerId) -> Poller { + fn poll(self, worker: WorkerId) -> Poller { + let layer = AckLayer::new(self.clone(), worker.clone()); let config = self.config.clone(); let controller = self.controller.clone(); let pool = self.pool.clone(); @@ -409,17 +407,21 @@ impl Backend Ack for MysqlStorage { +impl Ack for MysqlStorage { type Acknowledger = TaskId; type Error = sqlx::Error; async fn ack( - &self, + &mut self, worker_id: &WorkerId, task_id: &Self::Acknowledger, ) -> Result<(), sqlx::Error> { diff --git a/packages/apalis-sql/src/postgres.rs b/packages/apalis-sql/src/postgres.rs index 6de6bef9..77cfe3f8 100644 --- a/packages/apalis-sql/src/postgres.rs +++ b/packages/apalis-sql/src/postgres.rs @@ -53,14 +53,15 @@ use apalis_core::task::namespace::Namespace; use apalis_core::task::task_id::TaskId; use apalis_core::worker::WorkerId; use apalis_core::{Backend, Codec}; -use async_stream::try_stream; -use futures::{FutureExt, Stream}; -use futures::{StreamExt, TryStreamExt}; +use futures::channel::mpsc; +use futures::StreamExt; +use futures::{select, stream, SinkExt}; use log::error; use serde::{de::DeserializeOwned, Serialize}; use sqlx::postgres::PgListener; use sqlx::types::chrono::{DateTime, Utc}; use sqlx::{Pool, Postgres, Row}; +use std::any::type_name; use std::convert::TryInto; use std::sync::Arc; use std::{fmt, io}; @@ -88,6 +89,7 @@ pub struct PostgresStorage { config: Config, controller: Controller, ack_notify: Notify<(WorkerId, TaskId)>, + subscription: Option, } impl Clone for PostgresStorage { @@ -99,6 +101,7 @@ impl Clone for PostgresStorage { config: self.config.clone(), controller: self.controller.clone(), ack_notify: self.ack_notify.clone(), + subscription: self.subscription.clone(), } } } @@ -126,55 +129,77 @@ impl Backend, T>; - fn common_layer(&self, worker_id: WorkerId) -> Self::Layer { - AckLayer::new(self.clone(), worker_id) - } - - fn poll(mut self, worker: WorkerId) -> Poller { + fn poll(mut self, worker: WorkerId) -> Poller { + let layer = AckLayer::new(self.clone(), worker.clone()); + let subscription = self.subscription.clone(); let config = self.config.clone(); let controller = self.controller.clone(); - let stream = self - .stream_jobs(&worker, config.poll_interval, config.buffer_size) - .map_err(|e| Error::SourceError(Box::new(e))); - let stream = BackendStream::new(stream.boxed(), controller); + let (mut tx, rx) = mpsc::channel(self.config.buffer_size); let ack_notify = self.ack_notify.clone(); let pool = self.pool.clone(); - let ack_heartbeat = async move { - while let Some(ids) = ack_notify - .clone() - .ready_chunks(config.buffer_size) - .next() - .await - { - let worker_ids: Vec = ids.iter().map(|c| c.0.to_string()).collect(); - let task_ids: Vec = ids.iter().map(|c| c.1.to_string()).collect(); - - let query = - "UPDATE apalis.jobs SET status = 'Done', done_at = now() WHERE id = ANY($1::text[]) AND lock_by = ANY($2::text[])"; - if let Err(e) = sqlx::query(query) - .bind(task_ids) - .bind(worker_ids) - .execute(&pool) - .await - { - error!("Ack failed: {e}"); + + let heartbeat = async move { + let mut keep_alive_stm = apalis_core::interval::interval(config.keep_alive).fuse(); + let mut ack_stream = ack_notify.clone().ready_chunks(config.buffer_size).fuse(); + + let mut poll_next_stm = apalis_core::interval::interval(config.poll_interval).fuse(); + + let mut pg_notification = subscription + .map(|stm| stm.notify.boxed().fuse()) + .unwrap_or(stream::iter(vec![]).boxed().fuse()); + + async fn fetch_next_batch( + storage: &mut PostgresStorage, + worker: &WorkerId, + tx: &mut mpsc::Sender>, Error>>, + ) { + let res = storage.fetch_next(worker).await.unwrap(); + for job in res { + tx.send(Ok(Some(job))).await.unwrap(); } - apalis_core::sleep(config.poll_interval).await; } - }; - let heartbeat = async move { + loop { - let now: i64 = Utc::now().timestamp(); - if let Err(e) = self.keep_alive_at::(&worker, now).await { - error!("Heartbeat failed: {e}") - } - apalis_core::sleep(config.keep_alive).await; + select! { + _ = keep_alive_stm.next() => { + let now: i64 = Utc::now().timestamp(); + self.keep_alive_at::(&worker, now).await.unwrap(); + } + ids = ack_stream.next() => { + if let Some(ids) = ids { + let worker_ids: Vec = ids.iter().map(|c| c.0.to_string()).collect(); + let task_ids: Vec = ids.iter().map(|c| c.1.to_string()).collect(); + + let query = + "UPDATE apalis.jobs SET status = 'Done', done_at = now() WHERE id = ANY($1::text[]) AND lock_by = ANY($2::text[])"; + if let Err(e) = sqlx::query(query) + .bind(task_ids) + .bind(worker_ids) + .execute(&pool) + .await + { + error!("Ack failed: {e}"); + } + } + } + _ = poll_next_stm.next() => { + fetch_next_batch(&mut self, &worker, &mut tx).await; + } + _ = pg_notification.next() => { + fetch_next_batch(&mut self, &worker, &mut tx).await; + } + + + }; } - } - .boxed(); - Poller::new(stream, async { - futures::join!(heartbeat, ack_heartbeat); - }) + }; + Poller::new_with_layer( + BackendStream::new(rx.boxed(), controller), + async { + futures::join!(heartbeat); + }, + layer, + ) } } @@ -196,7 +221,7 @@ impl PostgresStorage<()> { impl PostgresStorage { /// New Storage from [PgPool] pub fn new(pool: PgPool) -> Self { - Self::new_with_config(pool, Config::default()) + Self::new_with_config(pool, Config::new(type_name::())) } /// New Storage from [PgPool] and custom config pub fn new_with_config(pool: PgPool, config: Config) -> Self { @@ -207,6 +232,7 @@ impl PostgresStorage { config, controller: Controller::new(), ack_notify: Notify::new(), + subscription: None, } } @@ -241,6 +267,16 @@ impl PgListen { }) } + /// Add a new subscription with a storage + pub fn subscribe_with(&mut self, storage: &mut PostgresStorage) { + let sub = PgSubscription { + notify: Notify::new(), + }; + self.subscriptions + .push((storage.config.namespace.to_owned(), sub.clone())); + storage.subscription = Some(sub) + } + /// Add a new subscription pub fn subscribe(&mut self, namespace: &str) -> PgSubscription { let sub = PgSubscription { @@ -266,44 +302,37 @@ impl PgListen { } impl PostgresStorage { - fn stream_jobs( - &self, - worker_id: &WorkerId, - interval: Duration, - buffer_size: usize, - ) -> impl Stream>, sqlx::Error>> { - let pool = self.pool.clone(); - let worker_id = worker_id.clone(); - let codec = self.codec.clone(); - let config = self.config.clone(); - try_stream! { - loop { - // Ideally wait for a job or a tick - apalis_core::sleep(interval).await; - let tx = pool.clone(); - let job_type = &config.namespace; - let fetch_query = "Select * from apalis.get_jobs($1, $2, $3);"; - let jobs: Vec> = sqlx::query_as(fetch_query) - .bind(worker_id.to_string()) - .bind(job_type) - // https://docs.rs/sqlx/latest/sqlx/postgres/types/index.html - .bind(i32::try_from(buffer_size).map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidInput, e)))?) - .fetch_all(&tx) - .await?; - for job in jobs { - - yield Some(Into::into(SqlRequest { - context: job.context, - req: codec.decode(&job.req).map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?, - })).map(|mut req: Request| { - req.insert(Namespace(config.namespace.clone())); - req - }) - } - - } - } - .boxed() + async fn fetch_next(&mut self, worker_id: &WorkerId) -> Result>, sqlx::Error> { + let config = &self.config; + let codec = &self.codec; + let job_type = &config.namespace; + let fetch_query = "Select * from apalis.get_jobs($1, $2, $3);"; + let jobs: Vec> = sqlx::query_as(fetch_query) + .bind(worker_id.to_string()) + .bind(job_type) + // https://docs.rs/sqlx/latest/sqlx/postgres/types/index.html + .bind( + i32::try_from(config.buffer_size) + .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidInput, e)))?, + ) + .fetch_all(&self.pool) + .await?; + let jobs: Vec<_> = jobs + .into_iter() + .map(|job| { + let req = SqlRequest { + context: job.context, + req: codec + .decode(&job.req) + .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e))) + .unwrap(), + }; + let mut req: Request = req.into(); + req.insert(Namespace(config.namespace.clone())); + req + }) + .collect(); + Ok(jobs) } async fn keep_alive_at( @@ -311,7 +340,6 @@ impl PostgresStorage { worker_id: &WorkerId, last_seen: Timestamp, ) -> Result<(), sqlx::Error> { - let pool = self.pool.clone(); let last_seen = DateTime::from_timestamp(last_seen, 0).ok_or(sqlx::Error::Io( io::Error::new(io::ErrorKind::InvalidInput, "Invalid Timestamp"), ))?; @@ -327,7 +355,7 @@ impl PostgresStorage { .bind(storage_name) .bind(std::any::type_name::()) .bind(last_seen) - .execute(&pool) + .execute(&self.pool) .await?; Ok(()) } @@ -353,7 +381,7 @@ where async fn push(&mut self, job: Self::Job) -> Result { let id = TaskId::new(); let query = "INSERT INTO apalis.jobs VALUES ($1, $2, $3, 'Pending', 0, 25, NOW() , NULL, NULL, NULL, NULL)"; - let pool = self.pool.clone(); + let job = self .codec .encode(&job) @@ -363,7 +391,7 @@ where .bind(job) .bind(id.to_string()) .bind(&job_type) - .execute(&pool) + .execute(&self.pool) .await?; Ok(id) } @@ -371,7 +399,7 @@ where async fn schedule(&mut self, job: Self::Job, on: Timestamp) -> Result { let query = "INSERT INTO apalis.jobs VALUES ($1, $2, $3, 'Pending', 0, 25, $4, NULL, NULL, NULL, NULL)"; - let pool = self.pool.clone(); + let id = TaskId::new(); let on = DateTime::from_timestamp(on, 0); let job = self @@ -384,21 +412,19 @@ where .bind(id.to_string()) .bind(job_type) .bind(on) - .execute(&pool) + .execute(&self.pool) .await?; Ok(id) } async fn fetch_by_id( - &self, + &mut self, job_id: &TaskId, ) -> Result>, sqlx::Error> { - let pool = self.pool.clone(); - let fetch_query = "SELECT * FROM apalis.jobs WHERE id = $1"; let res: Option> = sqlx::query_as(fetch_query) .bind(job_id.to_string()) - .fetch_optional(&pool) + .fetch_optional(&self.pool) .await?; match res { None => Ok(None), @@ -414,15 +440,13 @@ where } } - async fn len(&self) -> Result { - let pool = self.pool.clone(); + async fn len(&mut self) -> Result { let query = "Select Count(*) as count from apalis.jobs where status='Pending'"; - let record = sqlx::query(query).fetch_one(&pool).await?; + let record = sqlx::query(query).fetch_one(&self.pool).await?; record.try_get("count") } async fn reschedule(&mut self, job: Request, wait: Duration) -> Result<(), sqlx::Error> { - let pool = self.pool.clone(); let ctx = job .get::() .ok_or(sqlx::Error::Io(io::Error::new( @@ -431,7 +455,7 @@ where )))?; let job_id = ctx.id(); let on = Utc::now() + wait; - let mut tx = pool.acquire().await?; + let mut tx = self.pool.acquire().await?; let query = "UPDATE apalis.jobs SET status = 'Pending', done_at = NULL, lock_by = NULL, lock_at = NULL, run_at = $2 WHERE id = $1"; @@ -443,8 +467,7 @@ where Ok(()) } - async fn update(&self, job: Request) -> Result<(), sqlx::Error> { - let pool = self.pool.clone(); + async fn update(&mut self, job: Request) -> Result<(), sqlx::Error> { let ctx = job .get::() .ok_or(sqlx::Error::Io(io::Error::new( @@ -463,7 +486,7 @@ where let lock_at = *ctx.lock_at(); let last_error = ctx.last_error().clone(); - let mut tx = pool.acquire().await?; + let mut tx = self.pool.acquire().await?; let query = "UPDATE apalis.jobs SET status = $1, attempts = $2, done_at = $3, lock_by = $4, lock_at = $5, last_error = $6 WHERE id = $7"; sqlx::query(query) @@ -479,23 +502,22 @@ where Ok(()) } - async fn is_empty(&self) -> Result { + async fn is_empty(&mut self) -> Result { Ok(self.len().await? == 0) } - async fn vacuum(&self) -> Result { - let pool = self.pool.clone(); + async fn vacuum(&mut self) -> Result { let query = "Delete from apalis.jobs where status='Done'"; - let record = sqlx::query(query).execute(&pool).await?; + let record = sqlx::query(query).execute(&self.pool).await?; Ok(record.rows_affected().try_into().unwrap_or_default()) } } -impl Ack for PostgresStorage { +impl Ack for PostgresStorage { type Acknowledger = TaskId; type Error = sqlx::Error; async fn ack( - &self, + &mut self, worker_id: &WorkerId, task_id: &Self::Acknowledger, ) -> Result<(), sqlx::Error> { @@ -514,9 +536,7 @@ impl PostgresStorage { worker_id: &WorkerId, task_id: &TaskId, ) -> Result<(), sqlx::Error> { - let pool = self.pool.clone(); - - let mut tx = pool.acquire().await?; + let mut tx = self.pool.acquire().await?; let query = "UPDATE apalis.jobs SET status = 'Killed', done_at = now() WHERE id = $1 AND lock_by = $2"; sqlx::query(query) @@ -528,15 +548,13 @@ impl PostgresStorage { } /// Puts the job instantly back into the queue - /// Another [Worker] may consume + /// Another Worker may consume pub async fn retry( &mut self, worker_id: &WorkerId, task_id: &TaskId, ) -> Result<(), sqlx::Error> { - let pool = self.pool.clone(); - - let mut tx = pool.acquire().await?; + let mut tx = self.pool.acquire().await?; let query = "UPDATE apalis.jobs SET status = 'Pending', done_at = NULL, lock_by = NULL WHERE id = $1 AND lock_by = $2"; sqlx::query(query) @@ -548,7 +566,7 @@ impl PostgresStorage { } /// Reenqueue jobs that have been abandoned by their workers - pub async fn reenqueue_orphaned(&self, count: i32) -> Result<(), sqlx::Error> { + pub async fn reenqueue_orphaned(&mut self, count: i32) -> Result<(), sqlx::Error> { let job_type = self.config.namespace.clone(); let mut tx = self.pool.acquire().await?; let query = "Update apalis.jobs @@ -624,13 +642,8 @@ mod tests { storage: &mut PostgresStorage, worker_id: &WorkerId, ) -> Request { - let mut stream = storage.stream_jobs(worker_id, std::time::Duration::from_secs(10), 1); - stream - .next() - .await - .expect("stream is empty") - .expect("failed to poll job") - .expect("no job is pending") + let mut req = storage.fetch_next(worker_id).await; + req.unwrap()[0].clone() } async fn register_worker_at( diff --git a/packages/apalis-sql/src/sqlite.rs b/packages/apalis-sql/src/sqlite.rs index e8aad0bf..652184b6 100644 --- a/packages/apalis-sql/src/sqlite.rs +++ b/packages/apalis-sql/src/sqlite.rs @@ -18,6 +18,7 @@ use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use serde::{de::DeserializeOwned, Serialize}; use sqlx::types::chrono::Utc; use sqlx::{Pool, Row, Sqlite}; +use std::any::type_name; use std::convert::TryInto; use std::sync::Arc; use std::{fmt, io}; @@ -26,6 +27,12 @@ use std::{marker::PhantomData, time::Duration}; use crate::from_row::SqlRequest; pub use sqlx::sqlite::SqlitePool; + +/// The code used to encode Sqlite jobs. +/// +/// Currently uses JSON +pub type SqliteCodec = Arc + Sync + Send + 'static>>; + /// Represents a [Storage] that persists to Sqlite // #[derive(Debug)] pub struct SqliteStorage { @@ -33,7 +40,7 @@ pub struct SqliteStorage { job_type: PhantomData, controller: Controller, config: Config, - codec: Arc + Sync + Send + 'static>>, + codec: SqliteCodec, } impl fmt::Debug for SqliteStorage { @@ -47,16 +54,14 @@ impl fmt::Debug for SqliteStorage { "codec", &"Arc + Sync + Send + 'static>>", ) - // .field("ack_notify", &self.ack_notify) .finish() } } impl Clone for SqliteStorage { fn clone(&self) -> Self { - let pool = self.pool.clone(); SqliteStorage { - pool, + pool: self.pool.clone(), job_type: PhantomData, controller: self.controller.clone(), config: self.config.clone(), @@ -91,9 +96,15 @@ impl SqliteStorage<()> { } impl SqliteStorage { - /// Construct a new Storage from a pool + /// Create a new instance pub fn new(pool: SqlitePool) -> Self { - Self::new_with_config(pool, Config::default()) + Self { + pool, + job_type: PhantomData, + controller: Controller::new(), + config: Config::new(type_name::()), + codec: Arc::new(Box::new(JsonCodec)), + } } /// Create a new instance with a custom config @@ -112,7 +123,6 @@ impl SqliteStorage { worker_id: &WorkerId, last_seen: i64, ) -> Result<(), sqlx::Error> { - let pool = self.pool.clone(); let worker_type = self.config.namespace.clone(); let storage_name = std::any::type_name::(); let query = "INSERT INTO Workers (id, worker_type, storage_name, layers, last_seen) @@ -125,7 +135,7 @@ impl SqliteStorage { .bind(storage_name) .bind(std::any::type_name::()) .bind(last_seen) - .execute(&pool) + .execute(&self.pool) .await?; Ok(()) } @@ -134,10 +144,15 @@ impl SqliteStorage { pub fn pool(&self) -> &Pool { &self.pool } + + /// Expose the code used + pub fn codec(&self) -> &SqliteCodec { + &self.codec + } } async fn fetch_next( - pool: Pool, + pool: &Pool, worker_id: &WorkerId, id: String, config: &Config, @@ -149,7 +164,7 @@ async fn fetch_next( .bind(worker_id.to_string()) .bind(now) .bind(config.namespace.clone()) - .fetch_optional(&pool) + .fetch_optional(pool) .await?; Ok(job) @@ -182,7 +197,7 @@ impl SqliteStorage { .fetch_all(&mut *tx) .await?; for id in ids { - let res = fetch_next(pool.clone(), &worker_id, id.0, &config).await?; + let res = fetch_next(&pool, &worker_id, id.0, &config).await?; yield match res { None => None::>, Some(c) => Some( @@ -219,7 +234,6 @@ where async fn push(&mut self, job: Self::Job) -> Result { let id = TaskId::new(); let query = "INSERT INTO Jobs VALUES (?1, ?2, ?3, 'Pending', 0, 25, strftime('%s','now'), NULL, NULL, NULL, NULL)"; - let pool = self.pool.clone(); let job = self .codec @@ -230,7 +244,7 @@ where .bind(job) .bind(id.to_string()) .bind(job_type.to_string()) - .execute(&pool) + .execute(&self.pool) .await?; Ok(id) } @@ -238,7 +252,7 @@ where async fn schedule(&mut self, job: Self::Job, on: i64) -> Result { let query = "INSERT INTO Jobs VALUES (?1, ?2, ?3, 'Pending', 0, 25, ?4, NULL, NULL, NULL, NULL)"; - let pool = self.pool.clone(); + let id = TaskId::new(); let job = self .codec @@ -250,20 +264,19 @@ where .bind(id.to_string()) .bind(job_type) .bind(on) - .execute(&pool) + .execute(&self.pool) .await?; Ok(id) } async fn fetch_by_id( - &self, + &mut self, job_id: &TaskId, ) -> Result>, Self::Error> { - let pool = self.pool.clone(); let fetch_query = "SELECT * FROM Jobs WHERE id = ?1"; let res: Option> = sqlx::query_as(fetch_query) .bind(job_id.to_string()) - .fetch_optional(&pool) + .fetch_optional(&self.pool) .await?; match res { None => Ok(None), @@ -279,16 +292,13 @@ where } } - async fn len(&self) -> Result { - let pool = self.pool.clone(); - + async fn len(&mut self) -> Result { let query = "Select Count(*) as count from Jobs where status='Pending'"; - let record = sqlx::query(query).fetch_one(&pool).await?; + let record = sqlx::query(query).fetch_one(&self.pool).await?; record.try_get("count") } async fn reschedule(&mut self, job: Request, wait: Duration) -> Result<(), Self::Error> { - let pool = self.pool.clone(); let task_id = job.get::().ok_or(sqlx::Error::Io(io::Error::new( io::ErrorKind::InvalidData, "Missing TaskId", @@ -299,7 +309,7 @@ where .try_into() .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; - let mut tx = pool.acquire().await?; + let mut tx = self.pool.acquire().await?; let query = "UPDATE Jobs SET status = 'Failed', done_at = NULL, lock_by = NULL, lock_at = NULL, run_at = ?2 WHERE id = ?1"; let now: i64 = Utc::now().timestamp(); @@ -313,8 +323,7 @@ where Ok(()) } - async fn update(&self, job: Request) -> Result<(), Self::Error> { - let pool = self.pool.clone(); + async fn update(&mut self, job: Request) -> Result<(), Self::Error> { let ctx = job .get::() .ok_or(sqlx::Error::Io(io::Error::new( @@ -328,7 +337,7 @@ where let lock_at = *ctx.lock_at(); let last_error = ctx.last_error().clone(); let job_id = ctx.id(); - let mut tx = pool.acquire().await?; + let mut tx = self.pool.acquire().await?; let query = "UPDATE Jobs SET status = ?1, attempts = ?2, done_at = ?3, lock_by = ?4, lock_at = ?5, last_error = ?6 WHERE id = ?7"; sqlx::query(query) @@ -349,29 +358,26 @@ where Ok(()) } - async fn is_empty(&self) -> Result { + async fn is_empty(&mut self) -> Result { self.len().map_ok(|c| c == 0).await } - async fn vacuum(&self) -> Result { - let pool = self.pool.clone(); + async fn vacuum(&mut self) -> Result { let query = "Delete from Jobs where status='Done'"; - let record = sqlx::query(query).execute(&pool).await?; + let record = sqlx::query(query).execute(&self.pool).await?; Ok(record.rows_affected().try_into().unwrap_or_default()) } } impl SqliteStorage { /// Puts the job instantly back into the queue - /// Another [Worker] may consume + /// Another Worker may consume pub async fn retry( &mut self, worker_id: &WorkerId, job_id: &TaskId, ) -> Result<(), sqlx::Error> { - let pool = self.pool.clone(); - - let mut tx = pool.acquire().await?; + let mut tx = self.pool.acquire().await?; let query = "UPDATE Jobs SET status = 'Pending', done_at = NULL, lock_by = NULL WHERE id = ?1 AND lock_by = ?2"; sqlx::query(query) @@ -384,9 +390,7 @@ impl SqliteStorage { /// Kill a job pub async fn kill(&mut self, worker_id: &WorkerId, job_id: &TaskId) -> Result<(), sqlx::Error> { - let pool = self.pool.clone(); - - let mut tx = pool.begin().await?; + let mut tx = self.pool.begin().await?; let query = "UPDATE Jobs SET status = 'Killed', done_at = strftime('%s','now') WHERE id = ?1 AND lock_by = ?2"; sqlx::query(query) @@ -399,7 +403,7 @@ impl SqliteStorage { } /// Add jobs that failed back to the queue if there are still remaining attemps - pub async fn reenqueue_failed(&self) -> Result<(), sqlx::Error> { + pub async fn reenqueue_failed(&mut self) -> Result<(), sqlx::Error> { let job_type = self.config.namespace.clone(); let mut tx = self.pool.acquire().await?; let query = r#"Update Jobs @@ -448,11 +452,8 @@ impl Backend>>; type Layer = AckLayer, T>; - fn common_layer(&self, worker_id: WorkerId) -> Self::Layer { - AckLayer::new(self.clone(), worker_id) - } - - fn poll(mut self, worker: WorkerId) -> Poller { + fn poll(mut self, worker: WorkerId) -> Poller { + let layer = AckLayer::new(self.clone(), worker.clone()); let config = self.config.clone(); let controller = self.controller.clone(); let stream = self @@ -469,25 +470,24 @@ impl Backend Ack for SqliteStorage { +impl Ack for SqliteStorage { type Acknowledger = TaskId; type Error = sqlx::Error; async fn ack( - &self, + &mut self, worker_id: &WorkerId, task_id: &Self::Acknowledger, ) -> Result<(), sqlx::Error> { - let pool = self.pool.clone(); let query = "UPDATE Jobs SET status = 'Done', done_at = strftime('%s','now') WHERE id = ?1 AND lock_by = ?2"; sqlx::query(query) .bind(task_id.to_string()) .bind(worker_id.to_string()) - .execute(&pool) + .execute(&self.pool) .await?; Ok(()) } diff --git a/src/lib.rs b/src/lib.rs index d432ea3c..ea76136b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -34,7 +34,7 @@ //! async fn main() { //! let redis = std::env::var("REDIS_URL").expect("Missing REDIS_URL env variable"); //! let conn = apalis::redis::connect(redis).await.unwrap(); -//! let storage = RedisStorage::new(conn, Config::default()); +//! let storage = RedisStorage::new(conn); //! Monitor::::new() //! .register_with_count(2, { //! WorkerBuilder::new(&format!("quick-sand")) From 732553ab731999234b164e7315f12c74a80f5a49 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Tue, 2 Jul 2024 22:39:27 +0300 Subject: [PATCH 12/59] bump: to v0.6.0-alpha.1 (#343) * api: for redis and sqlite * Version: 0.6.0-alpha.1 Changelog: - Redis storage doesnt require pool to be clone. Allows use of deadpool-redis among others. - Namespace is picked by default for `new` methods. * fix: docs and tests * lint: cargo clippy and fmt * postgres: add a listener example * bump: to v0.6.0-alpha.1 --- Cargo.toml | 10 +++++----- packages/apalis-core/Cargo.toml | 2 +- packages/apalis-cron/Cargo.toml | 4 ++-- packages/apalis-redis/Cargo.toml | 4 ++-- packages/apalis-sql/Cargo.toml | 4 ++-- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 35e2f887..225d8e50 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis" -version = "0.6.0-alpha.0" +version = "0.6.0-alpha.1" authors = ["Geoffrey Mureithi "] description = "Simple, extensible multithreaded background job processing for Rust" repository = "https://github.com/geofmureithi/apalis" @@ -71,26 +71,26 @@ layers = [ docsrs = ["document-features"] [dependencies.apalis-redis] -version = "0.6.0-alpha.0" +version = "0.6.0-alpha.1" optional = true path = "./packages/apalis-redis" default-features = false [dependencies.apalis-sql] -version = "0.6.0-alpha.0" +version = "0.6.0-alpha.1" features = ["migrate"] optional = true default-features = false path = "./packages/apalis-sql" [dependencies.apalis-core] -version = "0.6.0-alpha.0" +version = "0.6.0-alpha.1" default-features = false path = "./packages/apalis-core" [dependencies.apalis-cron] -version = "0.6.0-alpha.0" +version = "0.6.0-alpha.1" optional = true default-features = false path = "./packages/apalis-cron" diff --git a/packages/apalis-core/Cargo.toml b/packages/apalis-core/Cargo.toml index 5122ee5a..81e08a68 100644 --- a/packages/apalis-core/Cargo.toml +++ b/packages/apalis-core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-core" -version = "0.6.0-alpha.0" +version = "0.6.0-alpha.1" authors = ["Njuguna Mureithi "] edition = "2021" license = "MIT" diff --git a/packages/apalis-cron/Cargo.toml b/packages/apalis-cron/Cargo.toml index edc847fa..ad0b2e04 100644 --- a/packages/apalis-cron/Cargo.toml +++ b/packages/apalis-cron/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-cron" -version = "0.6.0-alpha.0" +version = "0.6.0-alpha.1" edition = "2021" authors = ["Njuguna Mureithi "] license = "MIT" @@ -9,7 +9,7 @@ description = "A simple yet extensible library for cron-like job scheduling for # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-alpha.0", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-alpha.1", default-features = false, features = [ "sleep", "json", ] } diff --git a/packages/apalis-redis/Cargo.toml b/packages/apalis-redis/Cargo.toml index 688a4d24..8c22fd51 100644 --- a/packages/apalis-redis/Cargo.toml +++ b/packages/apalis-redis/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-redis" -version = "0.6.0-alpha.0" +version = "0.6.0-alpha.1" authors = ["Njuguna Mureithi "] edition = "2021" readme = "../../README.md" @@ -11,7 +11,7 @@ description = "Redis Storage for apalis: use Redis for background jobs and messa # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-alpha.0", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-alpha.1", default-features = false, features = [ "sleep", "json", ] } diff --git a/packages/apalis-sql/Cargo.toml b/packages/apalis-sql/Cargo.toml index 3e5ac1b0..1fbc5b43 100644 --- a/packages/apalis-sql/Cargo.toml +++ b/packages/apalis-sql/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-sql" -version = "0.6.0-alpha.0" +version = "0.6.0-alpha.1" authors = ["Njuguna Mureithi "] edition = "2021" readme = "../../README.md" @@ -25,7 +25,7 @@ features = ["chrono"] [dependencies] serde = { version = "1", features = ["derive"] } serde_json = "1" -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-alpha.0", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-alpha.1", default-features = false, features = [ "sleep", "json", ] } From dd9570cb54bfc6a068b15320191f5139204cc28a Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Thu, 4 Jul 2024 20:53:37 +0300 Subject: [PATCH 13/59] fix: allow cd for prereleases (#349) --- .github/workflows/cd.yaml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.github/workflows/cd.yaml b/.github/workflows/cd.yaml index e43e2f33..48e8c39f 100644 --- a/.github/workflows/cd.yaml +++ b/.github/workflows/cd.yaml @@ -31,10 +31,7 @@ jobs: # vX.Y.Z-foo is pre-release version VERSION=${GITHUB_REF#refs/tags/v} VERSION_NUMBER=${VERSION%-*} - PUBLISH_OPTS="--dry-run" - if [[ $VERSION == $VERSION_NUMBER ]]; then - PUBLISH_OPTS="" - fi + PUBLISH_OPTS="" echo VERSION=${VERSION} >> $GITHUB_ENV echo PUBLISH_OPTS=${PUBLISH_OPTS} >> $GITHUB_ENV echo VERSION_NUMBER=${VERSION_NUMBER} >> $GITHUB_ENV From 5861e8467310b1ede31f6fb7553c78fb4fa4e0f4 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Thu, 4 Jul 2024 21:09:38 +0300 Subject: [PATCH 14/59] Remove `Clone` constraints and buffer the service (#348) * feat: remove the `Clone` requirements for services * test save * fix: get buffered layer working * update: remove clone & update api * fix: tests and api * lint: clippy fixes * lint: cargo fmt --- examples/redis/Cargo.toml | 2 +- examples/redis/src/main.rs | 6 +- packages/apalis-core/src/layers.rs | 45 ++++- packages/apalis-core/src/lib.rs | 3 +- packages/apalis-core/src/monitor/mod.rs | 4 +- .../apalis-core/src/worker/buffer/error.rs | 68 +++++++ .../apalis-core/src/worker/buffer/future.rs | 79 ++++++++ .../apalis-core/src/worker/buffer/message.rs | 16 ++ packages/apalis-core/src/worker/buffer/mod.rs | 5 + .../apalis-core/src/worker/buffer/service.rs | 146 ++++++++++++++ .../apalis-core/src/worker/buffer/worker.rs | 184 ++++++++++++++++++ packages/apalis-core/src/worker/mod.rs | 26 ++- packages/apalis-redis/lua/ack_job.lua | 2 +- packages/apalis-redis/src/storage.rs | 31 +-- packages/apalis-sql/src/mysql.rs | 16 +- packages/apalis-sql/src/postgres.rs | 27 ++- packages/apalis-sql/src/sqlite.rs | 24 +-- 17 files changed, 613 insertions(+), 71 deletions(-) create mode 100644 packages/apalis-core/src/worker/buffer/error.rs create mode 100644 packages/apalis-core/src/worker/buffer/future.rs create mode 100644 packages/apalis-core/src/worker/buffer/message.rs create mode 100644 packages/apalis-core/src/worker/buffer/mod.rs create mode 100644 packages/apalis-core/src/worker/buffer/service.rs create mode 100644 packages/apalis-core/src/worker/buffer/worker.rs diff --git a/examples/redis/Cargo.toml b/examples/redis/Cargo.toml index 2f8a9be5..27ff00f9 100644 --- a/examples/redis/Cargo.toml +++ b/examples/redis/Cargo.toml @@ -8,7 +8,7 @@ license = "MIT OR Apache-2.0" [dependencies] anyhow = "1" tokio = { version = "1", features = ["full"] } -apalis = { path = "../../", features = ["redis", "timeout"]} +apalis = { path = "../../", features = ["redis", "timeout", "limit"]} serde = "1" env_logger = "0.10" tracing-subscriber = "0.3.11" diff --git a/examples/redis/src/main.rs b/examples/redis/src/main.rs index 87a35ea1..35b498d5 100644 --- a/examples/redis/src/main.rs +++ b/examples/redis/src/main.rs @@ -6,13 +6,13 @@ use std::{ use anyhow::Result; use apalis::prelude::*; -use apalis::redis::RedisStorage; +use apalis::{layers::limit::RateLimitLayer, redis::RedisStorage}; use email_service::{send_email, Email}; use tracing::{error, info}; async fn produce_jobs(mut storage: RedisStorage) -> Result<()> { - for index in 0..1 { + for index in 0..10 { storage .push(Email { to: index.to_string(), @@ -48,6 +48,7 @@ async fn main() -> Result<()> { let worker = WorkerBuilder::new("rango-tango") .chain(|svc| svc.timeout(Duration::from_millis(500))) .data(Count::default()) + .layer(RateLimitLayer::new(5, Duration::from_secs(1))) .with_storage(storage) .build_fn(send_email); @@ -71,6 +72,7 @@ async fn main() -> Result<()> { }) .shutdown_timeout(Duration::from_millis(5000)) .run_with_signal(async { + info!("Monitor started"); tokio::signal::ctrl_c().await?; info!("Monitor starting shutdown"); Ok(()) diff --git a/packages/apalis-core/src/layers.rs b/packages/apalis-core/src/layers.rs index 16355bfd..d561c685 100644 --- a/packages/apalis-core/src/layers.rs +++ b/packages/apalis-core/src/layers.rs @@ -162,24 +162,43 @@ pub trait Ack { /// Acknowledges successful processing of the given request fn ack( &mut self, - worker_id: &WorkerId, - data: &Self::Acknowledger, + response: AckResponse, ) -> impl Future> + Send; } +/// ACK response +#[derive(Debug, Clone)] +pub struct AckResponse { + /// The worker id + pub worker: WorkerId, + /// The acknowledger + pub acknowledger: A, + /// The stringified result + pub result: String, +} + +impl AckResponse { + /// Output a json for the response + pub fn to_json(&self) -> String { + format!( + r#"{{"worker": "{}", "acknowledger": "{}", "result": "{}"}}"#, + self.worker, self.acknowledger, self.result + ) + } +} + /// A generic stream that emits (worker_id, task_id) #[derive(Debug)] -pub struct AckStream(pub Sender<(WorkerId, A)>); +pub struct AckStream(pub Sender>); impl Ack for AckStream { type Acknowledger = A; type Error = SendError; fn ack( &mut self, - worker_id: &WorkerId, - data: &Self::Acknowledger, + response: AckResponse, ) -> impl Future> + Send { - self.0.send((worker_id.clone(), data.clone())).boxed() + self.0.send(response).boxed() } } @@ -248,7 +267,7 @@ where >>::Future: std::marker::Send + 'static, A: Ack + Send + 'static + Clone + Send + Sync, J: 'static, - >>::Response: std::marker::Send, + >>::Response: std::marker::Send + fmt::Debug + Sync, >::Acknowledger: Sync + Send + Clone, { type Response = SV::Response; @@ -266,12 +285,18 @@ where let mut ack = self.ack.clone(); let worker_id = self.worker_id.clone(); let data = request.get::<>::Acknowledger>().cloned(); - let fut = self.service.call(request); let fut_with_ack = async move { let res = fut.await; - if let Some(data) = data { - if let Err(_e) = ack.ack(&worker_id, &data).await { + if let Some(task_id) = data { + if let Err(_e) = ack + .ack(AckResponse { + worker: worker_id, + acknowledger: task_id, + result: format!("{res:?}"), + }) + .await + { // tracing::warn!("Acknowledgement Failed: {}", e); // try get monitor, and emit } diff --git a/packages/apalis-core/src/lib.rs b/packages/apalis-core/src/lib.rs index 8fb411d9..5511ceef 100644 --- a/packages/apalis-core/src/lib.rs +++ b/packages/apalis-core/src/lib.rs @@ -145,7 +145,8 @@ pub mod interval { Poll::Pending => return Poll::Pending, }; let interval = self.interval; - let _ = std::mem::replace(&mut self.timer, Box::pin(sleep(interval))); + let fut = std::mem::replace(&mut self.timer, Box::pin(sleep(interval))); + drop(fut); Poll::Ready(Some(())) } } diff --git a/packages/apalis-core/src/monitor/mod.rs b/packages/apalis-core/src/monitor/mod.rs index 393f58f8..efec9eb9 100644 --- a/packages/apalis-core/src/monitor/mod.rs +++ b/packages/apalis-core/src/monitor/mod.rs @@ -80,7 +80,7 @@ impl Monitor { /// Registers a single instance of a [Worker] pub fn register< J: Send + Sync + 'static, - S: Service> + Send + 'static + Clone, + S: Service> + Send + 'static, P: Backend> + 'static, >( mut self, @@ -109,7 +109,7 @@ impl Monitor { /// The monitor instance, with all workers added to the collection. pub fn register_with_count< J: Send + Sync + 'static, - S: Service> + Send + 'static + Clone, + S: Service> + Send + 'static, P: Backend> + 'static, >( mut self, diff --git a/packages/apalis-core/src/worker/buffer/error.rs b/packages/apalis-core/src/worker/buffer/error.rs new file mode 100644 index 00000000..a1da124d --- /dev/null +++ b/packages/apalis-core/src/worker/buffer/error.rs @@ -0,0 +1,68 @@ +//! Error types for the `Buffer` middleware. + +use std::{fmt, sync::Arc}; +use tower::BoxError; + +/// An error produced by a [`Service`] wrapped by a [`Buffer`] +/// +/// [`Service`]: crate::Service +/// [`Buffer`]: crate::buffer::Buffer +#[derive(Debug)] +pub(crate) struct ServiceError { + inner: Arc, +} + +/// An error produced when the a buffer's worker closes unexpectedly. +pub(crate) struct Closed { + _p: (), +} + +// ===== impl ServiceError ===== + +impl ServiceError { + pub(crate) fn new(inner: BoxError) -> ServiceError { + let inner = Arc::new(inner); + ServiceError { inner } + } + + // Private to avoid exposing `Clone` trait as part of the public API + pub(crate) fn clone(&self) -> ServiceError { + ServiceError { + inner: self.inner.clone(), + } + } +} + +impl fmt::Display for ServiceError { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(fmt, "buffered service failed: {}", self.inner) + } +} + +impl std::error::Error for ServiceError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + Some(&**self.inner) + } +} + +// ===== impl Closed ===== + +impl Closed { + pub(crate) fn new() -> Self { + Closed { _p: () } + } +} + +impl fmt::Debug for Closed { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_tuple("Closed").finish() + } +} + +impl fmt::Display for Closed { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.write_str("buffer's worker closed unexpectedly") + } +} + +impl std::error::Error for Closed {} diff --git a/packages/apalis-core/src/worker/buffer/future.rs b/packages/apalis-core/src/worker/buffer/future.rs new file mode 100644 index 00000000..8cf3baea --- /dev/null +++ b/packages/apalis-core/src/worker/buffer/future.rs @@ -0,0 +1,79 @@ +//! Future types for the [`Buffer`] middleware. +//! +//! [`Buffer`]: crate::buffer::Buffer + +use super::{error::Closed, message}; +use futures::ready; +use pin_project_lite::pin_project; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + +pin_project! { + /// Future that completes when the buffered service eventually services the submitted request. + #[derive(Debug)] + pub struct ResponseFuture { + #[pin] + state: ResponseState, + } +} + +pin_project! { + #[project = ResponseStateProj] + #[derive(Debug)] + enum ResponseState { + Failed { + error: Option, + }, + Rx { + #[pin] + rx: message::Rx, + }, + Poll { + #[pin] + fut: T, + }, + } +} + +impl ResponseFuture { + pub(crate) fn new(rx: message::Rx) -> Self { + ResponseFuture { + state: ResponseState::Rx { rx }, + } + } + + pub(crate) fn failed(err: tower::BoxError) -> Self { + ResponseFuture { + state: ResponseState::Failed { error: Some(err) }, + } + } +} + +impl Future for ResponseFuture +where + F: Future>, + E: Into, +{ + type Output = Result; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut this = self.project(); + + loop { + match this.state.as_mut().project() { + ResponseStateProj::Failed { error } => { + return Poll::Ready(Err(error.take().expect("polled after error"))); + } + ResponseStateProj::Rx { rx } => match ready!(rx.poll(cx)) { + Ok(Ok(fut)) => this.state.set(ResponseState::Poll { fut }), + Ok(Err(e)) => return Poll::Ready(Err(e.into())), + Err(_) => return Poll::Ready(Err(Closed::new().into())), + }, + ResponseStateProj::Poll { fut } => return fut.poll(cx).map_err(Into::into), + } + } + } +} diff --git a/packages/apalis-core/src/worker/buffer/message.rs b/packages/apalis-core/src/worker/buffer/message.rs new file mode 100644 index 00000000..02863a2d --- /dev/null +++ b/packages/apalis-core/src/worker/buffer/message.rs @@ -0,0 +1,16 @@ +use futures::channel::oneshot; + +use super::error::ServiceError; + +/// Message sent over buffer +#[derive(Debug)] +pub(crate) struct Message { + pub(crate) request: Request, + pub(crate) tx: Tx, +} + +/// Response sender +pub(crate) type Tx = oneshot::Sender>; + +/// Response receiver +pub(crate) type Rx = oneshot::Receiver>; diff --git a/packages/apalis-core/src/worker/buffer/mod.rs b/packages/apalis-core/src/worker/buffer/mod.rs new file mode 100644 index 00000000..c341f07d --- /dev/null +++ b/packages/apalis-core/src/worker/buffer/mod.rs @@ -0,0 +1,5 @@ +pub(crate) mod error; +pub(crate) mod future; +pub(crate) mod message; +pub(crate) mod service; +pub(crate) mod worker; diff --git a/packages/apalis-core/src/worker/buffer/service.rs b/packages/apalis-core/src/worker/buffer/service.rs new file mode 100644 index 00000000..a176764c --- /dev/null +++ b/packages/apalis-core/src/worker/buffer/service.rs @@ -0,0 +1,146 @@ +use super::{ + future::ResponseFuture, + message::Message, + worker::{Handle, Worker}, +}; + +use futures::channel::{mpsc, oneshot}; +use futures::task::AtomicWaker; +use std::sync::Arc; +use std::{ + future::Future, + task::{Context, Poll}, +}; +use tower::Service; + +/// Adds an mpsc buffer in front of an inner service. +/// +/// See the module documentation for more details. +#[derive(Debug)] +pub struct Buffer { + tx: PollSender>, + handle: Handle, +} + +impl Buffer +where + F: 'static, +{ + /// Creates a new [`Buffer`] wrapping `service`, but returns the background worker. + /// + /// This is useful if you do not want to spawn directly onto the runtime + /// but instead want to use your own executor. This will return the [`Buffer`] and + /// the background `Worker` that you can then spawn. + pub fn pair(service: S, bound: usize) -> (Self, Worker) + where + S: Service + Send + 'static, + F: Send, + S::Error: Into + Send + Sync, + Req: Send + 'static, + { + let (tx, rx) = mpsc::channel(bound); + let (handle, worker) = Worker::new(service, rx); + let buffer = Self { + tx: PollSender::new(tx), + handle, + }; + (buffer, worker) + } + + fn get_worker_error(&self) -> tower::BoxError { + self.handle.get_error_on_closed() + } +} + +impl Service for Buffer +where + F: Future> + Send + 'static, + E: Into, + Req: Send + 'static, +{ + type Response = Rsp; + type Error = tower::BoxError; + type Future = ResponseFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + // First, check if the worker is still alive. + if self.tx.is_closed() { + // If the inner service has errored, then we error here. + return Poll::Ready(Err(self.get_worker_error())); + } + + // Poll the sender to acquire a permit. + self.tx + .poll_reserve(cx) + .map_err(|_| self.get_worker_error()) + } + + fn call(&mut self, request: Req) -> Self::Future { + let (tx, rx) = oneshot::channel(); + match self.tx.send_item(Message { request, tx }) { + Ok(_) => ResponseFuture::new(rx), + Err(_) => ResponseFuture::failed(self.get_worker_error()), + } + } +} + +impl Clone for Buffer +where + Req: Send + 'static, + F: Send + 'static, +{ + fn clone(&self) -> Self { + Self { + handle: self.handle.clone(), + tx: self.tx.clone(), + } + } +} + +// PollSender implementation using futures and async-channel +#[derive(Debug)] +struct PollSender { + tx: mpsc::Sender, + waker: Arc, +} + +impl PollSender { + fn new(tx: mpsc::Sender) -> Self { + Self { + tx, + waker: Arc::new(AtomicWaker::new()), + } + } + + fn poll_reserve(&mut self, cx: &mut Context<'_>) -> Poll> { + if self.tx.is_closed() { + return Poll::Ready(Err(())); + } + + self.waker.register(cx.waker()); + + self.tx.poll_ready(cx).map(|res| match res { + Ok(_) => Ok(()), + Err(_) => Err(()), + }) + } + + fn send_item(&mut self, item: T) -> Result<(), ()> { + if self.tx.is_closed() { + return Err(()); + } + + self.tx.try_send(item).map_err(|_| ()) + } + + fn is_closed(&self) -> bool { + self.tx.is_closed() + } + + fn clone(&self) -> Self { + Self { + tx: self.tx.clone(), + waker: self.waker.clone(), + } + } +} diff --git a/packages/apalis-core/src/worker/buffer/worker.rs b/packages/apalis-core/src/worker/buffer/worker.rs new file mode 100644 index 00000000..1ace6f26 --- /dev/null +++ b/packages/apalis-core/src/worker/buffer/worker.rs @@ -0,0 +1,184 @@ +use super::{ + error::{Closed, ServiceError}, + message::Message, +}; +use futures::{channel::mpsc, ready, Stream}; +use std::sync::{Arc, Mutex}; +use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, +}; + +use tower::Service; + +pin_project_lite::pin_project! { + #[derive(Debug)] + pub struct Worker + where + T: Service, + { + current_message: Option>, + rx: mpsc::Receiver>, + service: T, + finish: bool, + failed: Option, + handle: Handle, + } +} + +/// Get the error out +#[derive(Debug)] +pub(crate) struct Handle { + inner: Arc>>, +} + +impl Worker +where + T: Service, + T::Error: Into, +{ + pub(crate) fn new( + service: T, + rx: mpsc::Receiver>, + ) -> (Handle, Worker) { + let handle = Handle { + inner: Arc::new(Mutex::new(None)), + }; + + let worker = Worker { + current_message: None, + finish: false, + failed: None, + rx, + service, + handle: handle.clone(), + }; + + (handle, worker) + } + + /// Return the next queued Message that hasn't been canceled. + /// + /// If a `Message` is returned, the `bool` is true if this is the first time we received this + /// message, and false otherwise (i.e., we tried to forward it to the backing service before). + #[allow(clippy::type_complexity)] + fn poll_next_msg( + &mut self, + cx: &mut Context<'_>, + ) -> Poll, bool)>> { + if self.finish { + // We've already received None and are shutting down + return Poll::Ready(None); + } + + // tracing::trace!("worker polling for next message"); + if let Some(msg) = self.current_message.take() { + // If the oneshot sender is closed, then the receiver is dropped, + // and nobody cares about the response. If this is the case, we + // should continue to the next request. + if !msg.tx.is_canceled() { + // tracing::trace!("resuming buffered request"); + return Poll::Ready(Some((msg, false))); + } + + // tracing::trace!("dropping cancelled buffered request"); + } + + // Get the next request + while let Some(msg) = ready!(Pin::new(&mut self.rx).poll_next(cx)) { + if !msg.tx.is_canceled() { + // tracing::trace!("processing new request"); + return Poll::Ready(Some((msg, true))); + } + // Otherwise, request is canceled, so pop the next one. + // tracing::trace!("dropping cancelled request"); + } + + Poll::Ready(None) + } + + fn failed(&mut self, error: tower::BoxError) { + let error = ServiceError::new(error); + + let mut inner = self.handle.inner.lock().unwrap(); + + if inner.is_some() { + return; + } + + *inner = Some(error.clone()); + drop(inner); + + self.rx.close(); + self.failed = Some(error); + } +} + +impl Future for Worker +where + T: Service, + T::Error: Into, +{ + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + if self.finish { + return Poll::Ready(()); + } + + loop { + match ready!(self.poll_next_msg(cx)) { + Some((msg, _)) => { + if let Some(ref failed) = self.failed { + let _ = msg.tx.send(Err(failed.clone())); + continue; + } + match self.service.poll_ready(cx) { + Poll::Ready(Ok(())) => { + let response = self.service.call(msg.request); + let _ = msg.tx.send(Ok(response)); + } + Poll::Pending => { + self.current_message = Some(msg); + return Poll::Pending; + } + Poll::Ready(Err(e)) => { + let error = e.into(); + self.failed(error); + let _ = msg.tx.send(Err(self + .failed + .as_ref() + .expect("Worker::failed did not set self.failed?") + .clone())); + } + } + } + None => { + // No more more requests _ever_. + self.finish = true; + return Poll::Ready(()); + } + } + } + } +} + +impl Handle { + pub(crate) fn get_error_on_closed(&self) -> tower::BoxError { + self.inner + .lock() + .unwrap() + .as_ref() + .map(|svc_err| svc_err.clone().into()) + .unwrap_or_else(|| Closed::new().into()) + } +} + +impl Clone for Handle { + fn clone(&self) -> Handle { + Handle { + inner: self.inner.clone(), + } + } +} diff --git a/packages/apalis-core/src/worker/mod.rs b/packages/apalis-core/src/worker/mod.rs index c92bef55..82539c8e 100644 --- a/packages/apalis-core/src/worker/mod.rs +++ b/packages/apalis-core/src/worker/mod.rs @@ -23,7 +23,11 @@ use std::task::{Context as TaskCtx, Poll, Waker}; use thiserror::Error; use tower::{Service, ServiceBuilder, ServiceExt}; +mod buffer; mod stream; + +pub use buffer::service::Buffer; + // By default a worker starts 3 futures, one for polling, one for worker stream and the other for consuming. const WORKER_FUTURES: usize = 3; @@ -217,7 +221,7 @@ impl Worker> { /// Start a worker with a custom executor pub fn with_executor(self, executor: E) -> Worker> where - S: Service> + Send + 'static + Clone, + S: Service> + Send + 'static, P: Backend> + 'static, J: Send + 'static + Sync, S::Future: Send, @@ -237,7 +241,7 @@ impl Worker> { .shared(); Self::build_worker_instance( WorkerId::new(self.id.name()), - service.clone(), + service, executor.clone(), notifier.clone(), polling.clone(), @@ -249,7 +253,7 @@ impl Worker> { /// Run as a monitored worker pub fn with_monitor(self, monitor: &Monitor) -> Worker> where - S: Service> + Send + 'static + Clone, + S: Service> + Send + 'static, P: Backend> + 'static, J: Send + 'static + Sync, S::Future: Send, @@ -270,7 +274,7 @@ impl Worker> { .shared(); Self::build_worker_instance( WorkerId::new(self.id.name()), - service.clone(), + service, executor.clone(), notifier.clone(), polling.clone(), @@ -286,7 +290,7 @@ impl Worker> { monitor: &Monitor, ) -> Vec>> where - S: Service> + Send + 'static + Clone, + S: Service> + Send + 'static, P: Backend> + 'static, J: Send + 'static + Sync, S::Future: Send, @@ -297,6 +301,7 @@ impl Worker> { { let notifier = Notify::new(); let service = self.state.service; + let (service, poll_worker) = Buffer::pair(service, instances); let backend = self.state.backend; let executor = monitor.executor().clone(); let context = monitor.context().clone(); @@ -307,6 +312,8 @@ impl Worker> { .shared(); let mut workers = Vec::new(); + executor.spawn(poll_worker); + for instance in 0..instances { workers.push(Self::build_worker_instance( WorkerId::new_with_instance(self.id.name(), instance), @@ -329,7 +336,7 @@ impl Worker> { executor: E, ) -> Vec>> where - S: Service> + Send + 'static + Clone, + S: Service> + Send + 'static, P: Backend> + 'static, J: Send + 'static + Sync, S::Future: Send, @@ -342,13 +349,14 @@ impl Worker> { let worker_id = self.id.clone(); let notifier = Notify::new(); let service = self.state.service; + let (service, poll_worker) = Buffer::pair(service, instances); let backend = self.state.backend; let poller = backend.poll(worker_id.clone()); let polling = poller.heartbeat.shared(); let worker_stream = WorkerStream::new(poller.stream, notifier.clone()) .into_future() .shared(); - + executor.spawn(poll_worker); let mut workers = Vec::new(); for instance in 0..instances { workers.push(Self::build_worker_instance( @@ -374,7 +382,7 @@ impl Worker> { context: Option, ) -> Worker> where - LS: Service> + Send + 'static + Clone, + LS: Service> + Send + 'static, LS::Future: Send + 'static, LS::Response: 'static, LS::Error: Send + Sync + Into + 'static, @@ -409,7 +417,7 @@ impl Worker> { worker: Worker>, notifier: WorkerNotify>, Error>>, ) where - LS: Service> + Send + 'static + Clone, + LS: Service> + Send + 'static, LS::Future: Send + 'static, LS::Response: 'static, LS::Error: Send + Sync + Into + 'static, diff --git a/packages/apalis-redis/lua/ack_job.lua b/packages/apalis-redis/lua/ack_job.lua index d451baed..a51b6a64 100644 --- a/packages/apalis-redis/lua/ack_job.lua +++ b/packages/apalis-redis/lua/ack_job.lua @@ -4,7 +4,7 @@ -- ARGV[1]: the job ID -- ARGV[2]: the current time --- Returns: nil +-- Returns: bool -- Remove the job from this consumer's inflight set local removed = redis.call("srem", KEYS[1], ARGV[1]) diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index 700b4638..c5f85a35 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -1,6 +1,6 @@ use apalis_core::codec::json::JsonCodec; use apalis_core::data::Extensions; -use apalis_core::layers::{AckLayer, AckStream}; +use apalis_core::layers::{Ack, AckLayer, AckResponse, AckStream}; use apalis_core::poller::controller::Controller; use apalis_core::poller::stream::BackendStream; use apalis_core::poller::Poller; @@ -19,11 +19,11 @@ use redis::aio::ConnectionLike; use redis::ErrorKind; use redis::{aio::ConnectionManager, Client, IntoConnectionInfo, RedisError, Script, Value}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; - use std::any::type_name; +use std::fmt::{self, Debug}; +use std::io; use std::num::TryFromIntError; use std::sync::Arc; -use std::{fmt, io}; use std::{marker::PhantomData, time::Duration}; /// Shorthand to create a client and connect @@ -452,8 +452,8 @@ impl< } } id_to_ack = ack_stream.next() => { - if let Some((worker_id, task_id)) = id_to_ack { - self.ack(&worker_id, &task_id).await.unwrap(); + if let Some(res) = id_to_ack { + self.ack(res).await.unwrap(); } } }; @@ -467,19 +467,22 @@ impl< } } -impl RedisStorage { - /// Ack a job - pub async fn ack(&mut self, worker_id: &WorkerId, task_id: &TaskId) -> Result<(), RedisError> { +impl Ack + for RedisStorage +{ + type Acknowledger = TaskId; + type Error = RedisError; + async fn ack(&mut self, res: AckResponse) -> Result<(), RedisError> { let ack_job = self.scripts.ack_job.clone(); - let inflight_set = format!("{}:{}", self.config.inflight_jobs_set(), worker_id); + let inflight_set = format!("{}:{}", self.config.inflight_jobs_set(), res.worker); let done_jobs_set = &self.config.done_jobs_set(); - let now: i64 = Utc::now().timestamp(); + let now: i64 = res.acknowledger.inner().timestamp_ms().try_into().unwrap(); ack_job .key(inflight_set) .key(done_jobs_set) - .arg(task_id.to_string()) + .arg(res.acknowledger.to_string()) .arg(now) .invoke_async(&mut self.conn) .await @@ -984,7 +987,11 @@ mod tests { let job_id = &job.get::().unwrap().id; storage - .ack(&worker_id, &job_id) + .ack(AckResponse { + acknowledger: job_id.clone(), + result: "Success".to_string(), + worker: worker_id.clone(), + }) .await .expect("failed to acknowledge the job"); diff --git a/packages/apalis-sql/src/mysql.rs b/packages/apalis-sql/src/mysql.rs index be54ed67..2abaa17b 100644 --- a/packages/apalis-sql/src/mysql.rs +++ b/packages/apalis-sql/src/mysql.rs @@ -1,6 +1,6 @@ use apalis_core::codec::json::JsonCodec; use apalis_core::error::Error; -use apalis_core::layers::{Ack, AckLayer}; +use apalis_core::layers::{Ack, AckLayer, AckResponse}; use apalis_core::notify::Notify; use apalis_core::poller::controller::Controller; use apalis_core::poller::stream::BackendStream; @@ -420,13 +420,9 @@ impl Backend Ack for MysqlStorage { type Acknowledger = TaskId; type Error = sqlx::Error; - async fn ack( - &mut self, - worker_id: &WorkerId, - task_id: &Self::Acknowledger, - ) -> Result<(), sqlx::Error> { + async fn ack(&mut self, response: AckResponse) -> Result<(), sqlx::Error> { self.ack_notify - .notify((worker_id.clone(), task_id.clone())) + .notify((response.worker.clone(), response.acknowledger.clone())) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::BrokenPipe, e)))?; Ok(()) @@ -631,7 +627,11 @@ mod tests { let job_id = ctx.id(); storage - .ack(&worker_id, job_id) + .ack(AckResponse { + acknowledger: job_id.clone(), + result: "Success".to_string(), + worker: worker_id.clone(), + }) .await .expect("failed to acknowledge the job"); diff --git a/packages/apalis-sql/src/postgres.rs b/packages/apalis-sql/src/postgres.rs index 77cfe3f8..c8f258cc 100644 --- a/packages/apalis-sql/src/postgres.rs +++ b/packages/apalis-sql/src/postgres.rs @@ -42,7 +42,7 @@ use crate::context::SqlContext; use crate::Config; use apalis_core::codec::json::JsonCodec; use apalis_core::error::Error; -use apalis_core::layers::{Ack, AckLayer}; +use apalis_core::layers::{Ack, AckLayer, AckResponse}; use apalis_core::notify::Notify; use apalis_core::poller::controller::Controller; use apalis_core::poller::stream::BackendStream; @@ -63,6 +63,7 @@ use sqlx::types::chrono::{DateTime, Utc}; use sqlx::{Pool, Postgres, Row}; use std::any::type_name; use std::convert::TryInto; +use std::fmt::Debug; use std::sync::Arc; use std::{fmt, io}; use std::{marker::PhantomData, time::Duration}; @@ -88,7 +89,7 @@ pub struct PostgresStorage { >, config: Config, controller: Controller, - ack_notify: Notify<(WorkerId, TaskId)>, + ack_notify: Notify>, subscription: Option, } @@ -137,7 +138,6 @@ impl Backend Backend { if let Some(ids) = ids { - let worker_ids: Vec = ids.iter().map(|c| c.0.to_string()).collect(); - let task_ids: Vec = ids.iter().map(|c| c.1.to_string()).collect(); + let worker_ids: Vec = ids.iter().map(|c| c.worker.to_string()).collect(); + let task_ids: Vec = ids.iter().map(|c| c.acknowledger.to_string()).collect(); let query = "UPDATE apalis.jobs SET status = 'Done', done_at = now() WHERE id = ANY($1::text[]) AND lock_by = ANY($2::text[])"; @@ -516,13 +516,9 @@ where impl Ack for PostgresStorage { type Acknowledger = TaskId; type Error = sqlx::Error; - async fn ack( - &mut self, - worker_id: &WorkerId, - task_id: &Self::Acknowledger, - ) -> Result<(), sqlx::Error> { + async fn ack(&mut self, res: AckResponse) -> Result<(), sqlx::Error> { self.ack_notify - .notify((worker_id.clone(), task_id.clone())) + .notify(res) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::Interrupted, e)))?; Ok(()) @@ -589,7 +585,6 @@ mod tests { use super::*; use email_service::Email; - use futures::StreamExt; use sqlx::types::chrono::Utc; /// migrate DB and return a storage instance. @@ -642,7 +637,7 @@ mod tests { storage: &mut PostgresStorage, worker_id: &WorkerId, ) -> Request { - let mut req = storage.fetch_next(worker_id).await; + let req = storage.fetch_next(worker_id).await; req.unwrap()[0].clone() } @@ -703,7 +698,11 @@ mod tests { let job_id = ctx.id(); storage - .ack(&worker_id, job_id) + .ack(AckResponse { + acknowledger: job_id.clone(), + result: "Success".to_string(), + worker: worker_id.clone(), + }) .await .expect("failed to acknowledge the job"); diff --git a/packages/apalis-sql/src/sqlite.rs b/packages/apalis-sql/src/sqlite.rs index 652184b6..3e31a64d 100644 --- a/packages/apalis-sql/src/sqlite.rs +++ b/packages/apalis-sql/src/sqlite.rs @@ -3,7 +3,7 @@ use crate::Config; use apalis_core::codec::json::JsonCodec; use apalis_core::error::Error; -use apalis_core::layers::{Ack, AckLayer}; +use apalis_core::layers::{Ack, AckLayer, AckResponse}; use apalis_core::poller::controller::Controller; use apalis_core::poller::stream::BackendStream; use apalis_core::poller::Poller; @@ -477,17 +477,15 @@ impl Backend Ack for SqliteStorage { type Acknowledger = TaskId; type Error = sqlx::Error; - async fn ack( - &mut self, - worker_id: &WorkerId, - task_id: &Self::Acknowledger, - ) -> Result<(), sqlx::Error> { + async fn ack(&mut self, res: AckResponse) -> Result<(), sqlx::Error> { + let pool = self.pool.clone(); let query = - "UPDATE Jobs SET status = 'Done', done_at = strftime('%s','now') WHERE id = ?1 AND lock_by = ?2"; + "UPDATE Jobs SET status = 'Done', done_at = strftime('%s','now'), last_error = ?3 WHERE id = ?1 AND lock_by = ?2"; sqlx::query(query) - .bind(task_id.to_string()) - .bind(worker_id.to_string()) - .execute(&self.pool) + .bind(res.acknowledger.to_string()) + .bind(res.worker.to_string()) + .bind(res.result) + .execute(&pool) .await?; Ok(()) } @@ -609,7 +607,11 @@ mod tests { let job_id = ctx.id(); storage - .ack(&worker_id, job_id) + .ack(AckResponse { + acknowledger: job_id.clone(), + result: "Success".to_string(), + worker: worker_id.clone(), + }) .await .expect("failed to acknowledge the job"); From 7b953fdad4ee2faa1d0c8e468827046f9199c8a8 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Thu, 4 Jul 2024 21:12:22 +0300 Subject: [PATCH 15/59] bump: to 0.6.0-rc.1 (#350) --- Cargo.toml | 10 +++++----- packages/apalis-core/Cargo.toml | 2 +- packages/apalis-cron/Cargo.toml | 4 ++-- packages/apalis-redis/Cargo.toml | 4 ++-- packages/apalis-sql/Cargo.toml | 4 ++-- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 225d8e50..efa9b433 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis" -version = "0.6.0-alpha.1" +version = "0.6.0-rc.1" authors = ["Geoffrey Mureithi "] description = "Simple, extensible multithreaded background job processing for Rust" repository = "https://github.com/geofmureithi/apalis" @@ -71,26 +71,26 @@ layers = [ docsrs = ["document-features"] [dependencies.apalis-redis] -version = "0.6.0-alpha.1" +version = "0.6.0-rc.1" optional = true path = "./packages/apalis-redis" default-features = false [dependencies.apalis-sql] -version = "0.6.0-alpha.1" +version = "0.6.0-rc.1" features = ["migrate"] optional = true default-features = false path = "./packages/apalis-sql" [dependencies.apalis-core] -version = "0.6.0-alpha.1" +version = "0.6.0-rc.1" default-features = false path = "./packages/apalis-core" [dependencies.apalis-cron] -version = "0.6.0-alpha.1" +version = "0.6.0-rc.1" optional = true default-features = false path = "./packages/apalis-cron" diff --git a/packages/apalis-core/Cargo.toml b/packages/apalis-core/Cargo.toml index 81e08a68..a873b9c6 100644 --- a/packages/apalis-core/Cargo.toml +++ b/packages/apalis-core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-core" -version = "0.6.0-alpha.1" +version = "0.6.0-rc.1" authors = ["Njuguna Mureithi "] edition = "2021" license = "MIT" diff --git a/packages/apalis-cron/Cargo.toml b/packages/apalis-cron/Cargo.toml index ad0b2e04..199c2f4d 100644 --- a/packages/apalis-cron/Cargo.toml +++ b/packages/apalis-cron/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-cron" -version = "0.6.0-alpha.1" +version = "0.6.0-rc.1" edition = "2021" authors = ["Njuguna Mureithi "] license = "MIT" @@ -9,7 +9,7 @@ description = "A simple yet extensible library for cron-like job scheduling for # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-alpha.1", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.1", default-features = false, features = [ "sleep", "json", ] } diff --git a/packages/apalis-redis/Cargo.toml b/packages/apalis-redis/Cargo.toml index 8c22fd51..2f581eda 100644 --- a/packages/apalis-redis/Cargo.toml +++ b/packages/apalis-redis/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-redis" -version = "0.6.0-alpha.1" +version = "0.6.0-rc.1" authors = ["Njuguna Mureithi "] edition = "2021" readme = "../../README.md" @@ -11,7 +11,7 @@ description = "Redis Storage for apalis: use Redis for background jobs and messa # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-alpha.1", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.1", default-features = false, features = [ "sleep", "json", ] } diff --git a/packages/apalis-sql/Cargo.toml b/packages/apalis-sql/Cargo.toml index 1fbc5b43..5d5af803 100644 --- a/packages/apalis-sql/Cargo.toml +++ b/packages/apalis-sql/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-sql" -version = "0.6.0-alpha.1" +version = "0.6.0-rc.1" authors = ["Njuguna Mureithi "] edition = "2021" readme = "../../README.md" @@ -25,7 +25,7 @@ features = ["chrono"] [dependencies] serde = { version = "1", features = ["derive"] } serde_json = "1" -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-alpha.1", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.1", default-features = false, features = [ "sleep", "json", ] } From 2fc018d76c6ee3a7846cc724ea2529118a13b458 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Tue, 9 Jul 2024 21:48:27 +0300 Subject: [PATCH 16/59] feat: add rsmq example (#353) --- Cargo.toml | 2 +- examples/redis-mq-example/Cargo.toml | 24 ++++ examples/redis-mq-example/src/main.rs | 179 ++++++++++++++++++++++++++ packages/apalis-core/src/memory.rs | 13 +- packages/apalis-core/src/mq/mod.rs | 6 +- packages/apalis-redis/src/storage.rs | 8 +- src/layers/tracing/make_span.rs | 2 +- 7 files changed, 218 insertions(+), 16 deletions(-) create mode 100644 examples/redis-mq-example/Cargo.toml create mode 100644 examples/redis-mq-example/src/main.rs diff --git a/Cargo.toml b/Cargo.toml index efa9b433..f09fdde1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -148,7 +148,7 @@ members = [ "examples/tracing", # "examples/rest-api", "examples/async-std-runtime", - "examples/basics", "examples/redis-with-msg-pack", "examples/redis-deadpool", + "examples/basics", "examples/redis-with-msg-pack", "examples/redis-deadpool", "examples/redis-mq-example", ] diff --git a/examples/redis-mq-example/Cargo.toml b/examples/redis-mq-example/Cargo.toml new file mode 100644 index 00000000..2caaee9d --- /dev/null +++ b/examples/redis-mq-example/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "redis-mq-example" +version = "0.1.0" +edition = "2021" + +[dependencies] +apalis = { path = "../../", features = ["redis"]} +apalis-core = { path = "../../packages/apalis-core", features = ["json"] } +rsmq_async = "11.1.0" +anyhow = "1" +tokio = { version = "1", features = ["full"] } +serde = "1" +env_logger = "0.10" +tracing-subscriber = "0.3.11" +chrono = { version = "0.4", default-features = false, features = ["clock"] } +email-service = { path = "../email-service" } +rmp-serde = "1.3" +tower = "0.4" +futures = "0.3" + + +[dependencies.tracing] +default-features = false +version = "0.1" diff --git a/examples/redis-mq-example/src/main.rs b/examples/redis-mq-example/src/main.rs new file mode 100644 index 00000000..754f9cb3 --- /dev/null +++ b/examples/redis-mq-example/src/main.rs @@ -0,0 +1,179 @@ +use std::{marker::PhantomData, sync::Arc, time::Duration}; + +use anyhow::Result; +use apalis::{ + layers::tracing::TraceLayer, + prelude::*, + redis::{self, Config, RedisCodec, RedisJob}, +}; + +use apalis_core::{ + codec::json::JsonCodec, + layers::{Ack, AckLayer}, +}; +use email_service::{send_email, Email}; +use futures::{channel::mpsc, SinkExt}; +use rsmq_async::{Rsmq, RsmqConnection, RsmqError}; +use tokio::time::sleep; +use tower::layer::util::Identity; +use tracing::{error, info}; + +struct RedisMq { + conn: Rsmq, + msg_type: PhantomData, + config: redis::Config, + codec: RedisCodec, +} + +// Manually implement Clone for RedisMq +impl Clone for RedisMq { + fn clone(&self) -> Self { + RedisMq { + conn: self.conn.clone(), + msg_type: PhantomData, + config: self.config.clone(), + codec: self.codec.clone(), + } + } +} + +impl Backend> for RedisMq { + type Stream = RequestStream>; + + type Layer = AckLayer; + + fn poll(mut self, worker_id: WorkerId) -> Poller { + let (mut tx, rx) = mpsc::channel(self.config.get_buffer_size()); + let stream: RequestStream> = Box::pin(rx); + let layer = AckLayer::new(self.clone(), worker_id); + let heartbeat = async move { + loop { + sleep(*self.config.get_poll_interval()).await; + let msg: Option> = self + .conn + .receive_message("email", None) + .await + .unwrap() + .map(|r| { + let mut req: Request<_> = self.codec.decode(&r.message).unwrap().into(); + req.insert(r.id); + req + }); + tx.send(Ok(msg)).await.unwrap(); + } + }; + Poller::new_with_layer(stream, heartbeat, layer) + } +} + +impl Ack for RedisMq { + type Acknowledger = String; + + type Error = RsmqError; + + async fn ack( + &mut self, + worker_id: &WorkerId, + data: &Self::Acknowledger, + ) -> Result<(), Self::Error> { + println!("Attempting to ACK {}", data); + self.conn.delete_message("email", data).await?; + Ok(()) + } +} + +impl MessageQueue for RedisMq { + type Error = RsmqError; + + async fn enqueue(&mut self, message: Message) -> Result<(), Self::Error> { + let bytes = self + .codec + .encode(&RedisJob { + ctx: Default::default(), + job: message, + }) + .unwrap(); + self.conn.send_message("email", bytes, None).await?; + Ok(()) + } + + async fn dequeue(&mut self) -> Result, Self::Error> { + let codec = self.codec.clone(); + Ok(self.conn.receive_message("email", None).await?.map(|r| { + let req: Request = codec.decode(&r.message).unwrap().into(); + req.take() + })) + } + + async fn size(&mut self) -> Result { + self.conn + .get_queue_attributes("email") + .await? + .msgs + .try_into() + .map_err(|_| RsmqError::InvalidFormat("Could not convert to usize".to_owned())) + } +} + +async fn produce_jobs(mq: &mut RedisMq) -> Result<()> { + for index in 0..1 { + mq.enqueue(Email { + to: index.to_string(), + text: "Test background job from apalis".to_string(), + subject: "Background email job".to_string(), + }) + .await?; + } + Ok(()) +} + +#[tokio::main] +async fn main() -> Result<()> { + std::env::set_var("RUST_LOG", "debug"); + + tracing_subscriber::fmt::init(); + + let mut conn = rsmq_async::Rsmq::new(Default::default()).await?; + let _ = conn.create_queue("email", None, None, None).await; + let mut mq = RedisMq { + conn, + msg_type: PhantomData, + codec: RedisCodec::new(Box::new(JsonCodec)), + config: Config::default(), + }; + // This can be in another part of the program + // produce_jobs(&mut mq).await?; + + let worker = WorkerBuilder::new("rango-tango") + .layer(TraceLayer::new()) + .with_mq(mq) + .build_fn(send_email); + + Monitor::::new() + .register_with_count(2, worker) + .on_event(|e| { + let worker_id = e.id(); + match e.inner() { + Event::Start => { + info!("Worker [{worker_id}] started"); + } + Event::Error(e) => { + error!("Worker [{worker_id}] encountered an error: {e}"); + } + + Event::Exit => { + info!("Worker [{worker_id}] exited"); + } + _ => {} + } + }) + .shutdown_timeout(Duration::from_millis(5000)) + .run_with_signal(async { + tokio::signal::ctrl_c().await?; + info!("Monitor starting shutdown"); + Ok(()) + }) + .await?; + info!("Monitor shutdown complete"); + Ok(()) +} diff --git a/packages/apalis-core/src/memory.rs b/packages/apalis-core/src/memory.rs index f4a2da6d..558b0e30 100644 --- a/packages/apalis-core/src/memory.rs +++ b/packages/apalis-core/src/memory.rs @@ -113,17 +113,16 @@ impl Backend> for MemoryStorage { impl MessageQueue for MemoryStorage { type Error = (); - async fn enqueue(&self, message: Message) -> Result<(), Self::Error> { - self.inner.sender.clone().try_send(message).unwrap(); + async fn enqueue(&mut self, message: Message) -> Result<(), Self::Error> { + self.inner.sender.try_send(message).unwrap(); Ok(()) } - async fn dequeue(&self) -> Result, ()> { - Err(()) - // self.inner.receiver.lock().await.next().await + async fn dequeue(&mut self) -> Result, ()> { + Ok(self.inner.receiver.lock().await.next().await) } - async fn size(&self) -> Result { - Ok(self.inner.clone().count().await) + async fn size(&mut self) -> Result { + Ok(self.inner.receiver.lock().await.size_hint().0) } } diff --git a/packages/apalis-core/src/mq/mod.rs b/packages/apalis-core/src/mq/mod.rs index 0183294b..a01d7482 100644 --- a/packages/apalis-core/src/mq/mod.rs +++ b/packages/apalis-core/src/mq/mod.rs @@ -12,12 +12,12 @@ pub trait MessageQueue: Backend> { type Error; /// Enqueues a message to the queue. - fn enqueue(&self, message: Message) -> impl Future> + Send; + fn enqueue(&mut self, message: Message) -> impl Future> + Send; /// Attempts to dequeue a message from the queue. /// Returns `None` if the queue is empty. - fn dequeue(&self) -> impl Future, Self::Error>> + Send; + fn dequeue(&mut self) -> impl Future, Self::Error>> + Send; /// Returns the current size of the queue. - fn size(&self) -> impl Future> + Send; + fn size(&mut self) -> impl Future> + Send; } diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index c5f85a35..ad96440d 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -94,8 +94,8 @@ struct RedisScript { /// The actual structure of a Redis job #[derive(Clone, Debug, Serialize, Deserialize)] pub struct RedisJob { - ctx: Context, - job: J, + pub ctx: Context, + pub job: J, } impl From> for Request { @@ -126,8 +126,8 @@ impl TryFrom> for RedisJob { } } -#[derive(Clone, Debug, Serialize, Deserialize)] -struct Context { +#[derive(Clone, Debug, Serialize, Deserialize, Default)] +pub struct Context { id: TaskId, attempts: usize, } diff --git a/src/layers/tracing/make_span.rs b/src/layers/tracing/make_span.rs index 58698caf..4de8f651 100644 --- a/src/layers/tracing/make_span.rs +++ b/src/layers/tracing/make_span.rs @@ -73,7 +73,7 @@ impl MakeSpan for DefaultMakeSpan { tracing::span!( parent: span, $level, - "job", + "task", ) }; } From 8bc289953930b180ac6a1218c729b304ccdcdc3a Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Tue, 9 Jul 2024 23:02:33 +0300 Subject: [PATCH 17/59] Fix: load layer from poller (#354) * fix: backend layers were not loaded * fix: handle clone --- examples/redis/src/main.rs | 7 +++-- packages/apalis-core/src/layers.rs | 2 +- packages/apalis-core/src/monitor/mod.rs | 16 ++++++++++- packages/apalis-core/src/worker/mod.rs | 38 +++++++++++++++++++++++-- 4 files changed, 56 insertions(+), 7 deletions(-) diff --git a/examples/redis/src/main.rs b/examples/redis/src/main.rs index 35b498d5..d3da324c 100644 --- a/examples/redis/src/main.rs +++ b/examples/redis/src/main.rs @@ -5,8 +5,8 @@ use std::{ }; use anyhow::Result; -use apalis::prelude::*; use apalis::{layers::limit::RateLimitLayer, redis::RedisStorage}; +use apalis::{layers::TimeoutLayer, prelude::*}; use email_service::{send_email, Email}; use tracing::{error, info}; @@ -46,9 +46,10 @@ async fn main() -> Result<()> { produce_jobs(storage.clone()).await?; let worker = WorkerBuilder::new("rango-tango") - .chain(|svc| svc.timeout(Duration::from_millis(500))) - .data(Count::default()) + .chain(|svc| svc.map_err(|e| Error::Failed(e))) .layer(RateLimitLayer::new(5, Duration::from_secs(1))) + .layer(TimeoutLayer::new(Duration::from_millis(500))) + .data(Count::default()) .with_storage(storage) .build_fn(send_email); diff --git a/packages/apalis-core/src/layers.rs b/packages/apalis-core/src/layers.rs index d561c685..dc480cde 100644 --- a/packages/apalis-core/src/layers.rs +++ b/packages/apalis-core/src/layers.rs @@ -188,7 +188,7 @@ impl AckResponse { } /// A generic stream that emits (worker_id, task_id) -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct AckStream(pub Sender>); impl Ack for AckStream { diff --git a/packages/apalis-core/src/monitor/mod.rs b/packages/apalis-core/src/monitor/mod.rs index efec9eb9..c4837e99 100644 --- a/packages/apalis-core/src/monitor/mod.rs +++ b/packages/apalis-core/src/monitor/mod.rs @@ -5,7 +5,7 @@ use std::{ }; use futures::{future::BoxFuture, Future, FutureExt}; -use tower::Service; +use tower::{Layer, Service}; mod shutdown; use crate::{ @@ -91,6 +91,13 @@ impl Monitor { S::Response: 'static, S::Error: Send + Sync + 'static + Into,

>>::Stream: Unpin + Send + 'static, + P::Layer: Layer, + <

>>::Layer as Layer>::Service: Service>, + <

>>::Layer as Layer>::Service: Send, + <<

>>::Layer as Layer>::Service as Service>>::Future: + Send, + <<

>>::Layer as Layer>::Service as Service>>::Error: + Send + std::error::Error + Sync, { self.workers.push(worker.with_monitor(&self)); @@ -121,6 +128,13 @@ impl Monitor { S::Response: 'static, S::Error: Send + Sync + 'static + Into,

>>::Stream: Unpin + Send + 'static, + P::Layer: Layer, + <

>>::Layer as Layer>::Service: Service>, + <

>>::Layer as Layer>::Service: Send, + <<

>>::Layer as Layer>::Service as Service>>::Future: + Send, + <<

>>::Layer as Layer>::Service as Service>>::Error: + Send + std::error::Error + Sync, { let workers = worker.with_monitor_instances(count, &self); self.workers.extend(workers); diff --git a/packages/apalis-core/src/worker/mod.rs b/packages/apalis-core/src/worker/mod.rs index 82539c8e..3f3fa599 100644 --- a/packages/apalis-core/src/worker/mod.rs +++ b/packages/apalis-core/src/worker/mod.rs @@ -21,7 +21,7 @@ use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; use std::task::{Context as TaskCtx, Poll, Waker}; use thiserror::Error; -use tower::{Service, ServiceBuilder, ServiceExt}; +use tower::{Layer, Service, ServiceBuilder, ServiceExt}; mod buffer; mod stream; @@ -230,12 +230,21 @@ impl Worker> { S::Error: Send + Sync + 'static + Into,

>>::Stream: Unpin + Send + 'static, E: Executor + Clone + Send + 'static + Sync, + P::Layer: Layer, + <

>>::Layer as Layer>::Service: Service>, + <

>>::Layer as Layer>::Service: Send, + <<

>>::Layer as Layer>::Service as Service>>::Future: + Send, + <<

>>::Layer as Layer>::Service as Service>>::Error: + Send + std::error::Error + Sync, { let notifier = Notify::new(); let service = self.state.service; let backend = self.state.backend; let poller = backend.poll(self.id.clone()); let polling = poller.heartbeat.shared(); + let default_layer = poller.layer; + let service = default_layer.layer(service); let worker_stream = WorkerStream::new(poller.stream, notifier.clone()) .into_future() .shared(); @@ -261,6 +270,13 @@ impl Worker> { S::Error: Send + Sync + 'static + Into,

>>::Stream: Unpin + Send + 'static, E: Executor + Clone + Send + 'static + Sync, + P::Layer: Layer, + <

>>::Layer as Layer>::Service: Service>, + <

>>::Layer as Layer>::Service: Send, + <<

>>::Layer as Layer>::Service as Service>>::Future: + Send, + <<

>>::Layer as Layer>::Service as Service>>::Error: + Send + std::error::Error + Sync, { let notifier = Notify::new(); let service = self.state.service; @@ -268,6 +284,8 @@ impl Worker> { let executor = monitor.executor().clone(); let context = monitor.context().clone(); let poller = backend.poll(self.id.clone()); + let default_layer = poller.layer; + let service = default_layer.layer(service); let polling = poller.heartbeat.shared(); let worker_stream = WorkerStream::new(poller.stream, notifier.clone()) .into_future() @@ -298,14 +316,23 @@ impl Worker> { S::Error: Send + Sync + 'static + Into,

>>::Stream: Unpin + Send + 'static, E: Executor + Clone + Send + 'static + Sync, + P::Layer: Layer, + <

>>::Layer as Layer>::Service: Service>, + <

>>::Layer as Layer>::Service: Send, + <<

>>::Layer as Layer>::Service as Service>>::Future: + Send, + <<

>>::Layer as Layer>::Service as Service>>::Error: + Send + std::error::Error + Sync, { let notifier = Notify::new(); let service = self.state.service; - let (service, poll_worker) = Buffer::pair(service, instances); let backend = self.state.backend; let executor = monitor.executor().clone(); let context = monitor.context().clone(); let poller = backend.poll(self.id.clone()); + let default_layer = poller.layer; + let service = default_layer.layer(service); + let (service, poll_worker) = Buffer::pair(service, instances); let polling = poller.heartbeat.shared(); let worker_stream = WorkerStream::new(poller.stream, notifier.clone()) .into_future() @@ -345,6 +372,13 @@ impl Worker> { S::Error: Send + Sync + 'static + Into,

>>::Stream: Unpin + Send + 'static, E: Executor + Clone + Send + 'static + Sync, + P::Layer: Layer, + <

>>::Layer as Layer>::Service: Service>, + <

>>::Layer as Layer>::Service: Send, + <<

>>::Layer as Layer>::Service as Service>>::Future: + Send, + <<

>>::Layer as Layer>::Service as Service>>::Error: + Send + std::error::Error + Sync, { let worker_id = self.id.clone(); let notifier = Notify::new(); From a6998a6855b12bf549fa286cbce70098810ff7aa Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Tue, 9 Jul 2024 23:18:02 +0300 Subject: [PATCH 18/59] Fix: mq example (#355) * fix: mq ack * lint: fmt --- benches/storages.rs | 1 - examples/redis-mq-example/src/main.rs | 16 +++++----------- packages/apalis-core/src/mq/mod.rs | 3 ++- 3 files changed, 7 insertions(+), 13 deletions(-) diff --git a/benches/storages.rs b/benches/storages.rs index 2e601bb4..f0566378 100644 --- a/benches/storages.rs +++ b/benches/storages.rs @@ -6,7 +6,6 @@ use apalis::{ postgres::{PgPool, PostgresStorage}, sqlite::{SqlitePool, SqliteStorage}, }; -use apalis_redis::Config; use criterion::*; use futures::Future; use paste::paste; diff --git a/examples/redis-mq-example/src/main.rs b/examples/redis-mq-example/src/main.rs index 754f9cb3..04c3e30a 100644 --- a/examples/redis-mq-example/src/main.rs +++ b/examples/redis-mq-example/src/main.rs @@ -9,13 +9,12 @@ use apalis::{ use apalis_core::{ codec::json::JsonCodec, - layers::{Ack, AckLayer}, + layers::{Ack, AckLayer, AckResponse}, }; use email_service::{send_email, Email}; use futures::{channel::mpsc, SinkExt}; use rsmq_async::{Rsmq, RsmqConnection, RsmqError}; use tokio::time::sleep; -use tower::layer::util::Identity; use tracing::{error, info}; struct RedisMq { @@ -71,13 +70,9 @@ impl Ack for RedisMq { type Error = RsmqError; - async fn ack( - &mut self, - worker_id: &WorkerId, - data: &Self::Acknowledger, - ) -> Result<(), Self::Error> { - println!("Attempting to ACK {}", data); - self.conn.delete_message("email", data).await?; + async fn ack(&mut self, ack: AckResponse) -> Result<(), Self::Error> { + println!("Attempting to ACK {}", ack.acknowledger); + self.conn.delete_message("email", &ack.acknowledger).await?; Ok(()) } } @@ -141,8 +136,7 @@ async fn main() -> Result<()> { codec: RedisCodec::new(Box::new(JsonCodec)), config: Config::default(), }; - // This can be in another part of the program - // produce_jobs(&mut mq).await?; + produce_jobs(&mut mq).await?; let worker = WorkerBuilder::new("rango-tango") .layer(TraceLayer::new()) diff --git a/packages/apalis-core/src/mq/mod.rs b/packages/apalis-core/src/mq/mod.rs index a01d7482..217695ca 100644 --- a/packages/apalis-core/src/mq/mod.rs +++ b/packages/apalis-core/src/mq/mod.rs @@ -12,7 +12,8 @@ pub trait MessageQueue: Backend> { type Error; /// Enqueues a message to the queue. - fn enqueue(&mut self, message: Message) -> impl Future> + Send; + fn enqueue(&mut self, message: Message) + -> impl Future> + Send; /// Attempts to dequeue a message from the queue. /// Returns `None` if the queue is empty. From adf401ef651cb2413c57d99cd922bc6ec20b7b41 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Wed, 10 Jul 2024 00:09:57 +0300 Subject: [PATCH 19/59] fix: handle unwraps in storages (#356) * fix: handle unwraps in storages * fix: ensure no unwrap --- examples/redis-mq-example/src/main.rs | 2 +- packages/apalis-redis/src/storage.rs | 36 +++++++++++++++++++++------ packages/apalis-sql/src/postgres.rs | 35 ++++++++++++++++++++------ 3 files changed, 56 insertions(+), 17 deletions(-) diff --git a/examples/redis-mq-example/src/main.rs b/examples/redis-mq-example/src/main.rs index 04c3e30a..2be18b1b 100644 --- a/examples/redis-mq-example/src/main.rs +++ b/examples/redis-mq-example/src/main.rs @@ -1,4 +1,4 @@ -use std::{marker::PhantomData, sync::Arc, time::Duration}; +use std::{marker::PhantomData, time::Duration}; use anyhow::Result; use apalis::{ diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index ad96440d..31a76c1f 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -94,7 +94,9 @@ struct RedisScript { /// The actual structure of a Redis job #[derive(Clone, Debug, Serialize, Deserialize)] pub struct RedisJob { + /// The job context pub ctx: Context, + /// The inner job pub job: J, } @@ -433,27 +435,45 @@ impl< let mut poll_next_stm = apalis_core::interval::interval(config.poll_interval).fuse(); - // TODO: use .ready_chunks(config.buffer_size) - // TODO: create a ack_jobs.loa let mut ack_stream = ack_rx.fuse(); + if let Err(e) = self.keep_alive(&worker).await { + error!("RegistrationError: {}", e); + } + loop { select! { _ = keep_alive_stm.next() => { - self.keep_alive(&worker).await.unwrap(); + if let Err(e) = self.keep_alive(&worker).await { + error!("KeepAliveError: {}", e); + } } _ = enqueue_scheduled_stm.next() => { - self.enqueue_scheduled(config.buffer_size).await.unwrap(); + if let Err(e) = self.enqueue_scheduled(config.buffer_size).await { + error!("EnqueueScheduledError: {}", e); + } } _ = poll_next_stm.next() => { - let res = self.fetch_next(&worker).await.unwrap(); - for job in res { - tx.send(Ok(Some(job))).await.unwrap(); + let res = self.fetch_next(&worker).await; + match res { + Err(e) => { + error!("PollNextError: {}", e); + } + Ok(res) => { + for job in res { + if let Err(e) = tx.send(Ok(Some(job))).await { + error!("EnqueueError: {}", e); + } + } + } } + } id_to_ack = ack_stream.next() => { if let Some(res) = id_to_ack { - self.ack(res).await.unwrap(); + if let Err(e) = self.ack(res).await { + error!("AckError: {}", e); + } } } }; diff --git a/packages/apalis-sql/src/postgres.rs b/packages/apalis-sql/src/postgres.rs index c8f258cc..72a67a26 100644 --- a/packages/apalis-sql/src/postgres.rs +++ b/packages/apalis-sql/src/postgres.rs @@ -152,18 +152,32 @@ impl Backend, worker: &WorkerId, tx: &mut mpsc::Sender>, Error>>, - ) { - let res = storage.fetch_next(worker).await.unwrap(); + ) -> Result<(), Error> { + let res = storage + .fetch_next(worker) + .await + .map_err(|e| Error::Failed(Box::new(e)))?; for job in res { - tx.send(Ok(Some(job))).await.unwrap(); + tx.send(Ok(Some(job))) + .await + .map_err(|e| Error::Failed(Box::new(e)))?; } + Ok(()) + } + + if let Err(e) = self + .keep_alive_at::(&worker, Utc::now().timestamp()) + .await + { + error!("KeepAliveError: {}", e); } loop { select! { _ = keep_alive_stm.next() => { - let now: i64 = Utc::now().timestamp(); - self.keep_alive_at::(&worker, now).await.unwrap(); + if let Err(e) = self.keep_alive_at::(&worker, Utc::now().timestamp()).await { + error!("KeepAliveError: {}", e); + } } ids = ack_stream.next() => { if let Some(ids) = ids { @@ -178,15 +192,20 @@ impl Backend { - fetch_next_batch(&mut self, &worker, &mut tx).await; + if let Err(e) = fetch_next_batch(&mut self, &worker, &mut tx).await { + error!("FetchNextError: {e}"); + + } } _ = pg_notification.next() => { - fetch_next_batch(&mut self, &worker, &mut tx).await; + if let Err(e) = fetch_next_batch(&mut self, &worker, &mut tx).await { + error!("PgNotificationError: {e}"); + } } From d5496ff3a3c33f36363bf8dc9434b2e0e467f1fd Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Wed, 10 Jul 2024 07:00:22 +0300 Subject: [PATCH 20/59] fix: better apalis deps allowing tree shaking for backends (#357) * fix: better apalis deps allowing tree shaking for backends * fix: remove backend features in the root crate --- Cargo.toml | 57 +++++------------------- README.md | 16 +++---- benches/storages.rs | 4 +- examples/actix-web/Cargo.toml | 3 +- examples/actix-web/src/main.rs | 5 ++- examples/async-std-runtime/Cargo.toml | 2 +- examples/async-std-runtime/src/main.rs | 3 +- examples/axum/Cargo.toml | 5 ++- examples/axum/src/main.rs | 5 ++- examples/basics/Cargo.toml | 3 +- examples/basics/src/main.rs | 7 +-- examples/mysql/Cargo.toml | 2 +- examples/mysql/src/main.rs | 5 ++- examples/postgres/Cargo.toml | 3 +- examples/postgres/src/main.rs | 4 +- examples/prometheus/Cargo.toml | 3 +- examples/prometheus/src/main.rs | 5 ++- examples/redis-deadpool/Cargo.toml | 3 +- examples/redis-deadpool/src/main.rs | 6 +-- examples/redis-mq-example/Cargo.toml | 3 +- examples/redis-mq-example/src/main.rs | 10 ++--- examples/redis-with-msg-pack/Cargo.toml | 3 +- examples/redis-with-msg-pack/src/main.rs | 8 ++-- examples/redis/Cargo.toml | 3 +- examples/redis/src/main.rs | 5 ++- examples/sentry/Cargo.toml | 3 +- examples/sentry/src/main.rs | 10 ++--- examples/sqlite/Cargo.toml | 8 +--- examples/sqlite/src/main.rs | 4 +- examples/tracing/Cargo.toml | 5 ++- examples/tracing/src/main.rs | 15 +++---- packages/apalis-core/src/builder.rs | 15 ++----- packages/apalis-core/src/monitor/mod.rs | 4 +- packages/apalis-core/src/worker/mod.rs | 2 +- packages/apalis-cron/README.md | 43 +++++++++++------- packages/apalis-cron/src/lib.rs | 13 ++---- packages/apalis-redis/Cargo.toml | 2 +- packages/apalis-redis/src/lib.rs | 4 +- packages/apalis-redis/src/storage.rs | 2 +- src/lib.rs | 40 +---------------- 40 files changed, 135 insertions(+), 208 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index f09fdde1..431d740d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,16 +17,6 @@ bench = false [features] default = ["tracing", "tokio-comp"] -## Include redis storage -redis = ["apalis-redis"] -## Include Postgres storage -postgres = ["apalis-sql/postgres"] -## Include SQlite storage -sqlite = ["apalis-sql/sqlite"] -## Include MySql storage -mysql = ["apalis-sql/mysql"] -## Include Cron functionality -cron = ["apalis-cron"] ## Support Tracing 👀 tracing = ["dep:tracing", "dep:tracing-futures"] @@ -44,19 +34,9 @@ limit = ["tower/limit"] ## Support filtering jobs based on a predicate filter = ["tower/filter"] ## Compatibility with async-std and smol runtimes -async-std-comp = [ - "apalis-sql?/async-std-comp", - "apalis-redis?/async-std-comp", - "apalis-cron?/async-std-comp", - "async-std", -] +async-std-comp = ["async-std"] ## Compatibility with tokio and actix runtimes -tokio-comp = [ - "apalis-sql?/tokio-comp", - "apalis-redis?/tokio-comp", - "apalis-cron?/tokio-comp", - "tokio", -] +tokio-comp = ["tokio"] layers = [ "sentry", @@ -70,30 +50,11 @@ layers = [ docsrs = ["document-features"] -[dependencies.apalis-redis] -version = "0.6.0-rc.1" -optional = true -path = "./packages/apalis-redis" -default-features = false - -[dependencies.apalis-sql] - -version = "0.6.0-rc.1" -features = ["migrate"] -optional = true -default-features = false -path = "./packages/apalis-sql" - [dependencies.apalis-core] version = "0.6.0-rc.1" default-features = false path = "./packages/apalis-core" -[dependencies.apalis-cron] -version = "0.6.0-rc.1" -optional = true -default-features = false -path = "./packages/apalis-cron" [dependencies.document-features] version = "0.2" @@ -112,11 +73,12 @@ pprof = { version = "0.13", features = ["flamegraph"] } paste = "1.0.14" serde = "1" tokio = { version = "1", features = ["macros", "rt-multi-thread"] } -apalis = { path = ".", features = ["redis", "sqlite", "postgres", "mysql"] } +apalis-redis = { path = "./packages/apalis-redis" } redis = { version = "0.25.3", default-features = false, features = [ - "script", - "aio", - "connection-manager", + "tokio-comp", + "script", + "aio", + "connection-manager", ] } [dev-dependencies.sqlx] @@ -148,7 +110,10 @@ members = [ "examples/tracing", # "examples/rest-api", "examples/async-std-runtime", - "examples/basics", "examples/redis-with-msg-pack", "examples/redis-deadpool", "examples/redis-mq-example", + "examples/basics", + "examples/redis-with-msg-pack", + "examples/redis-deadpool", + "examples/redis-mq-example", ] diff --git a/README.md b/README.md index 08acd4fe..2ecc975e 100644 --- a/README.md +++ b/README.md @@ -59,14 +59,16 @@ To get started, just add to Cargo.toml ```toml [dependencies] -apalis = { version = "0.5", features = ["redis"] } # Backends available: postgres, sqlite, mysql, amqp +apalis = { version = "0.6" } +apalis-redis = { version = "0.6" } +# apalis-sql = { version = "0.6", features = ["postgres"] } # or mysql, sqlite ``` ## Usage ```rust use apalis::prelude::*; -use apalis::redis::{RedisStorage, Config}; +use apalis_redis::{RedisStorage, Config}; use serde::{Deserialize, Serialize}; #[derive(Debug, Deserialize, Serialize)] @@ -81,11 +83,12 @@ async fn send_email(job: Email, data: Data) -> Result<(), Error> { } #[tokio::main] -async fn main() -> Result<()> { +async fn main() -> { std::env::set_var("RUST_LOG", "debug"); env_logger::init(); let redis_url = std::env::var("REDIS_URL").expect("Missing env variable REDIS_URL"); - let storage = RedisStorage::new(redis, Config::default()).await?; + let conn = apalis_redis::connect(redis_url).await.expect("Could not connect"); + let storage = RedisStorage::new(conn); Monitor::new() .register_with_count(2, { WorkerBuilder::new(format!("email-worker")) @@ -117,11 +120,6 @@ async fn produce_route_jobs(storage: &RedisStorage) -> Result<()> { ## Feature flags - _tracing_ (enabled by default) — Support Tracing 👀 -- _redis_ — Include redis storage -- _postgres_ — Include Postgres storage -- _sqlite_ — Include SQlite storage -- _mysql_ — Include MySql storage -- _cron_ — Include cron job processing - _sentry_ — Support for Sentry exception and performance monitoring - _prometheus_ — Support Prometheus metrics - _retry_ — Support direct retrying jobs diff --git a/benches/storages.rs b/benches/storages.rs index f0566378..721a3f0f 100644 --- a/benches/storages.rs +++ b/benches/storages.rs @@ -1,11 +1,11 @@ use apalis::prelude::*; -use apalis::redis::RedisStorage; use apalis::{ mysql::{MySqlPool, MysqlStorage}, postgres::{PgPool, PostgresStorage}, sqlite::{SqlitePool, SqliteStorage}, }; +use apalis_redis::RedisStorage; use criterion::*; use futures::Future; use paste::paste; @@ -132,7 +132,7 @@ define_bench!("sqlite_in_memory", { }); define_bench!("redis", { - let conn = apalis::redis::connect(env!("REDIS_URL")).await.unwrap(); + let conn = apalis_redis::connect(env!("REDIS_URL")).await.unwrap(); let redis = RedisStorage::new(conn); redis }); diff --git a/examples/actix-web/Cargo.toml b/examples/actix-web/Cargo.toml index 43093e80..7f755b65 100644 --- a/examples/actix-web/Cargo.toml +++ b/examples/actix-web/Cargo.toml @@ -7,7 +7,8 @@ license = "MIT OR Apache-2.0" [dependencies] anyhow = "1" -apalis = { path = "../../", features = ["redis"] } +apalis = { path = "../../" } +apalis-redis = { path = "../../packages/apalis-redis" } serde = "1" env_logger = "0.10" actix-web = "4" diff --git a/examples/actix-web/src/main.rs b/examples/actix-web/src/main.rs index 41e42be5..1efb1716 100644 --- a/examples/actix-web/src/main.rs +++ b/examples/actix-web/src/main.rs @@ -1,9 +1,10 @@ use actix_web::rt::signal; use actix_web::{web, App, HttpResponse, HttpServer}; use anyhow::Result; +use apalis::layers::tracing::TraceLayer; use apalis::prelude::*; use apalis::utils::TokioExecutor; -use apalis::{layers::tracing::TraceLayer, redis::RedisStorage}; +use apalis_redis::RedisStorage; use futures::future; use email_service::{send_email, Email}; @@ -26,7 +27,7 @@ async fn main() -> Result<()> { std::env::set_var("RUST_LOG", "debug"); env_logger::init(); - let conn = apalis::redis::connect("redis://127.0.0.1/").await?; + let conn = apalis_redis::connect("redis://127.0.0.1/").await?; let storage = RedisStorage::new(conn); let data = web::Data::new(storage.clone()); let http = async { diff --git a/examples/async-std-runtime/Cargo.toml b/examples/async-std-runtime/Cargo.toml index b3645e4f..18701e17 100644 --- a/examples/async-std-runtime/Cargo.toml +++ b/examples/async-std-runtime/Cargo.toml @@ -8,11 +8,11 @@ edition = "2021" [dependencies] anyhow = "1" apalis = { path = "../../", default-features = false, features = [ - "cron", "async-std-comp", "tracing", "retry", ] } +apalis-cron = { path = "../../packages/apalis-cron" } apalis-core = { path = "../../packages/apalis-core", default-features = false } async-std = { version = "1.12.0", features = ["attributes"] } serde = "1" diff --git a/examples/async-std-runtime/src/main.rs b/examples/async-std-runtime/src/main.rs index 0fcd9955..f77a8958 100644 --- a/examples/async-std-runtime/src/main.rs +++ b/examples/async-std-runtime/src/main.rs @@ -2,11 +2,10 @@ use std::{future::Future, str::FromStr, time::Duration}; use anyhow::Result; use apalis::{ - cron::{CronStream, Schedule}, layers::{retry::RetryLayer, retry::RetryPolicy, tracing::MakeSpan, tracing::TraceLayer}, prelude::*, }; - +use apalis_cron::{CronStream, Schedule}; use chrono::{DateTime, Utc}; use tracing::{debug, info, Instrument, Level, Span}; diff --git a/examples/axum/Cargo.toml b/examples/axum/Cargo.toml index 95a322d1..5d14a34d 100644 --- a/examples/axum/Cargo.toml +++ b/examples/axum/Cargo.toml @@ -11,6 +11,7 @@ tokio = { version = "1.0", features = ["full"] } tracing = "0.1" tracing-subscriber = { version = "0.3", features = ["env-filter"] } serde = { version = "1.0", features = ["derive"] } -apalis = { path = "../../", features = ["redis"] } +apalis = { path = "../../" } +apalis-redis = { path = "../../packages/apalis-redis" } futures = "0.3" -email-service = { path = "../email-service" } \ No newline at end of file +email-service = { path = "../email-service" } diff --git a/examples/axum/src/main.rs b/examples/axum/src/main.rs index 590cd32b..3ef43012 100644 --- a/examples/axum/src/main.rs +++ b/examples/axum/src/main.rs @@ -4,8 +4,9 @@ //! cd examples && cargo run -p axum-example //! ``` use anyhow::Result; +use apalis::layers::tracing::TraceLayer; use apalis::prelude::*; -use apalis::{layers::tracing::TraceLayer, redis::RedisStorage}; +use apalis_redis::RedisStorage; use axum::{ extract::Form, http::StatusCode, @@ -55,7 +56,7 @@ async fn main() -> Result<()> { )) .with(tracing_subscriber::fmt::layer()) .init(); - let conn = apalis::redis::connect("redis://127.0.0.1/").await?; + let conn = apalis_redis::connect("redis://127.0.0.1/").await?; let storage = RedisStorage::new(conn); // build our application with some routes let app = Router::new() diff --git a/examples/basics/Cargo.toml b/examples/basics/Cargo.toml index c6589c25..d77bccbe 100644 --- a/examples/basics/Cargo.toml +++ b/examples/basics/Cargo.toml @@ -8,7 +8,8 @@ license = "MIT OR Apache-2.0" [dependencies] thiserror = "1" tokio = { version = "1", features = ["full"] } -apalis = { path = "../../", features = ["sqlite", "limit", "tokio-comp"] } +apalis = { path = "../../", features = ["limit", "tokio-comp"] } +apalis-sql = { path = "../../packages/apalis-sql" } serde = "1" tracing-subscriber = "0.3.11" email-service = { path = "../email-service" } diff --git a/examples/basics/src/main.rs b/examples/basics/src/main.rs index 341f8f55..8ef6829a 100644 --- a/examples/basics/src/main.rs +++ b/examples/basics/src/main.rs @@ -4,11 +4,8 @@ mod service; use std::time::Duration; -use apalis::{ - layers::tracing::TraceLayer, - prelude::*, - sqlite::{SqlitePool, SqliteStorage}, -}; +use apalis::{layers::tracing::TraceLayer, prelude::*}; +use apalis_sql::sqlite::{SqlitePool, SqliteStorage}; use email_service::Email; use layer::LogLayer; diff --git a/examples/mysql/Cargo.toml b/examples/mysql/Cargo.toml index bbd3a96b..9fcc1298 100644 --- a/examples/mysql/Cargo.toml +++ b/examples/mysql/Cargo.toml @@ -8,10 +8,10 @@ license = "MIT OR Apache-2.0" [dependencies] anyhow = "1" apalis = { path = "../../", features = [ - "mysql", "tokio-comp", "tracing", ], default-features = false } +apalis-sql = { path = "../../packages/apalis-sql", features = ["mysql"] } serde = "1" tracing-subscriber = "0.3.11" chrono = { version = "0.4", default-features = false, features = ["clock"] } diff --git a/examples/mysql/src/main.rs b/examples/mysql/src/main.rs index 9ec2dca3..4b520b5b 100644 --- a/examples/mysql/src/main.rs +++ b/examples/mysql/src/main.rs @@ -1,7 +1,8 @@ use anyhow::Result; -use apalis::mysql::MySqlPool; +use apalis::layers::tracing::TraceLayer; use apalis::prelude::*; -use apalis::{layers::tracing::TraceLayer, mysql::MysqlStorage}; +use apalis_sql::mysql::MySqlPool; +use apalis_sql::mysql::MysqlStorage; use email_service::{send_email, Email}; async fn produce_jobs(storage: &MysqlStorage) -> Result<()> { diff --git a/examples/postgres/Cargo.toml b/examples/postgres/Cargo.toml index bff70949..151c8a43 100644 --- a/examples/postgres/Cargo.toml +++ b/examples/postgres/Cargo.toml @@ -7,7 +7,8 @@ license = "MIT OR Apache-2.0" [dependencies] anyhow = "1" -apalis = { path = "../../", features = ["postgres", "retry"] } +apalis = { path = "../../", features = ["retry"] } +apalis-sql = { path = "../../packages/apalis-sql", features = ["postgres"] } serde = "1" tracing-subscriber = "0.3.11" chrono = { version = "0.4", default-features = false, features = ["clock"] } diff --git a/examples/postgres/src/main.rs b/examples/postgres/src/main.rs index 11d235cc..8f06c897 100644 --- a/examples/postgres/src/main.rs +++ b/examples/postgres/src/main.rs @@ -1,8 +1,8 @@ use anyhow::Result; use apalis::layers::retry::RetryPolicy; -use apalis::postgres::{PgListen, PgPool}; +use apalis::layers::tracing::TraceLayer; use apalis::prelude::*; -use apalis::{layers::tracing::TraceLayer, postgres::PostgresStorage}; +use apalis_sql::postgres::{PgListen, PgPool, PostgresStorage}; use email_service::{send_email, Email}; use tower::retry::RetryLayer; use tracing::{debug, info}; diff --git a/examples/prometheus/Cargo.toml b/examples/prometheus/Cargo.toml index 32a16476..16689182 100644 --- a/examples/prometheus/Cargo.toml +++ b/examples/prometheus/Cargo.toml @@ -11,7 +11,8 @@ tokio = { version = "1.0", features = ["full"] } tracing = "0.1" tracing-subscriber = { version = "0.3", features = ["env-filter"] } serde = { version = "1.0", features = ["derive"] } -apalis = { path = "../../", features = ["redis", "prometheus"] } +apalis = { path = "../../", features = ["prometheus"] } +apalis-redis = { path = "../../packages/apalis-redis" } futures = "0.3" metrics = "0.21" metrics-exporter-prometheus = "0.12" diff --git a/examples/prometheus/src/main.rs b/examples/prometheus/src/main.rs index 4f680cd1..8dc14390 100644 --- a/examples/prometheus/src/main.rs +++ b/examples/prometheus/src/main.rs @@ -4,8 +4,9 @@ //! cd examples && cargo run -p prometheus-example //! ``` use anyhow::Result; +use apalis::layers::prometheus::PrometheusLayer; use apalis::prelude::*; -use apalis::{layers::prometheus::PrometheusLayer, redis::RedisStorage}; +use apalis_redis::RedisStorage; use axum::{ extract::Form, http::StatusCode, @@ -29,7 +30,7 @@ async fn main() -> Result<()> { )) .with(tracing_subscriber::fmt::layer()) .init(); - let conn = apalis::redis::connect("redis://127.0.0.1/").await?; + let conn = apalis_redis::connect("redis://127.0.0.1/").await?; let storage = RedisStorage::new(conn); // build our application with some routes let recorder_handle = setup_metrics_recorder(); diff --git a/examples/redis-deadpool/Cargo.toml b/examples/redis-deadpool/Cargo.toml index f0d3424d..6ac893a1 100644 --- a/examples/redis-deadpool/Cargo.toml +++ b/examples/redis-deadpool/Cargo.toml @@ -7,7 +7,8 @@ edition = "2021" deadpool-redis = { version = "0.15.1" } anyhow = "1" tokio = { version = "1", features = ["full"] } -apalis = { path = "../../", features = ["redis", "timeout"] } +apalis = { path = "../../", features = ["timeout"] } +apalis-redis = { path = "../../packages/apalis-redis" } serde = "1" env_logger = "0.10" tracing-subscriber = "0.3.11" diff --git a/examples/redis-deadpool/src/main.rs b/examples/redis-deadpool/src/main.rs index c98ce20b..7dfe928a 100644 --- a/examples/redis-deadpool/src/main.rs +++ b/examples/redis-deadpool/src/main.rs @@ -2,7 +2,7 @@ use std::time::Duration; use anyhow::Result; use apalis::prelude::*; -use apalis::redis::RedisStorage; +use apalis_redis::RedisStorage; use deadpool_redis::{Config, Connection, Runtime}; use email_service::{send_email, Email}; @@ -14,8 +14,8 @@ async fn main() -> Result<()> { tracing_subscriber::fmt::init(); - let config = apalis::redis::Config::default() - .set_namespace("apalis::redis-dead-pool") + let config = apalis_redis::Config::default() + .set_namespace("apalis_redis-dead-pool") .set_max_retries(5); let cfg = Config::from_url("redis://127.0.0.1/"); diff --git a/examples/redis-mq-example/Cargo.toml b/examples/redis-mq-example/Cargo.toml index 2caaee9d..d841cb7c 100644 --- a/examples/redis-mq-example/Cargo.toml +++ b/examples/redis-mq-example/Cargo.toml @@ -4,7 +4,8 @@ version = "0.1.0" edition = "2021" [dependencies] -apalis = { path = "../../", features = ["redis"]} +apalis = { path = "../.." } +apalis-redis = { path = "../../packages/apalis-redis" } apalis-core = { path = "../../packages/apalis-core", features = ["json"] } rsmq_async = "11.1.0" anyhow = "1" diff --git a/examples/redis-mq-example/src/main.rs b/examples/redis-mq-example/src/main.rs index 2be18b1b..c7ce72eb 100644 --- a/examples/redis-mq-example/src/main.rs +++ b/examples/redis-mq-example/src/main.rs @@ -1,11 +1,9 @@ use std::{marker::PhantomData, time::Duration}; use anyhow::Result; -use apalis::{ - layers::tracing::TraceLayer, - prelude::*, - redis::{self, Config, RedisCodec, RedisJob}, -}; +use apalis::{layers::tracing::TraceLayer, prelude::*}; + +use apalis_redis::{self, Config, RedisCodec, RedisJob}; use apalis_core::{ codec::json::JsonCodec, @@ -20,7 +18,7 @@ use tracing::{error, info}; struct RedisMq { conn: Rsmq, msg_type: PhantomData, - config: redis::Config, + config: Config, codec: RedisCodec, } diff --git a/examples/redis-with-msg-pack/Cargo.toml b/examples/redis-with-msg-pack/Cargo.toml index f459387c..ec248247 100644 --- a/examples/redis-with-msg-pack/Cargo.toml +++ b/examples/redis-with-msg-pack/Cargo.toml @@ -6,7 +6,8 @@ edition = "2021" [dependencies] anyhow = "1" tokio = { version = "1", features = ["full"] } -apalis = { path = "../../", features = ["redis", "timeout"] } +apalis = { path = "../../", features = ["timeout"] } +apalis-redis = { path = "../../packages/apalis-redis" } serde = "1" env_logger = "0.10" tracing-subscriber = "0.3.11" diff --git a/examples/redis-with-msg-pack/src/main.rs b/examples/redis-with-msg-pack/src/main.rs index fc096c62..86e7ff5b 100644 --- a/examples/redis-with-msg-pack/src/main.rs +++ b/examples/redis-with-msg-pack/src/main.rs @@ -2,7 +2,7 @@ use std::time::Duration; use anyhow::Result; use apalis::prelude::*; -use apalis::redis::RedisStorage; +use apalis_redis::RedisStorage; use email_service::{send_email, Email}; use serde::{de::DeserializeOwned, Serialize}; @@ -27,9 +27,9 @@ async fn main() -> Result<()> { tracing_subscriber::fmt::init(); - let conn = apalis::redis::connect("redis://127.0.0.1/").await?; - let config = apalis::redis::Config::default() - .set_namespace("apalis::redis-with-msg-pack") + let conn = apalis_redis::connect("redis://127.0.0.1/").await?; + let config = apalis_redis::Config::default() + .set_namespace("apalis_redis-with-msg-pack") .set_max_retries(5); let storage = RedisStorage::new_with_codec(conn, config, MessagePack); // This can be in another part of the program diff --git a/examples/redis/Cargo.toml b/examples/redis/Cargo.toml index 27ff00f9..f3549b79 100644 --- a/examples/redis/Cargo.toml +++ b/examples/redis/Cargo.toml @@ -8,7 +8,8 @@ license = "MIT OR Apache-2.0" [dependencies] anyhow = "1" tokio = { version = "1", features = ["full"] } -apalis = { path = "../../", features = ["redis", "timeout", "limit"]} +apalis = { path = "../../", features = ["timeout", "limit"] } +apalis-redis = { path = "../../packages/apalis-redis" } serde = "1" env_logger = "0.10" tracing-subscriber = "0.3.11" diff --git a/examples/redis/src/main.rs b/examples/redis/src/main.rs index d3da324c..89e3c642 100644 --- a/examples/redis/src/main.rs +++ b/examples/redis/src/main.rs @@ -5,8 +5,9 @@ use std::{ }; use anyhow::Result; -use apalis::{layers::limit::RateLimitLayer, redis::RedisStorage}; +use apalis::layers::limit::RateLimitLayer; use apalis::{layers::TimeoutLayer, prelude::*}; +use apalis_redis::RedisStorage; use email_service::{send_email, Email}; use tracing::{error, info}; @@ -40,7 +41,7 @@ async fn main() -> Result<()> { tracing_subscriber::fmt::init(); - let conn = apalis::redis::connect("redis://127.0.0.1/").await?; + let conn = apalis_redis::connect("redis://127.0.0.1/").await?; let storage = RedisStorage::new(conn); // This can be in another part of the program produce_jobs(storage.clone()).await?; diff --git a/examples/sentry/Cargo.toml b/examples/sentry/Cargo.toml index 20b1a1a5..6e51b11e 100644 --- a/examples/sentry/Cargo.toml +++ b/examples/sentry/Cargo.toml @@ -7,7 +7,8 @@ license = "MIT OR Apache-2.0" [dependencies] anyhow = "1" -apalis = { path = "../../", features = ["redis", "sentry"] } +apalis = { path = "../../", features = ["sentry"] } +apalis-redis = { path = "../../packages/apalis-redis" } serde = "1" env_logger = "0.10" tracing-subscriber = { version = "0.3.11", features = ["env-filter"] } diff --git a/examples/sentry/src/main.rs b/examples/sentry/src/main.rs index 376507a6..a16b90a9 100644 --- a/examples/sentry/src/main.rs +++ b/examples/sentry/src/main.rs @@ -6,11 +6,9 @@ use std::time::Duration; use tracing_subscriber::prelude::*; use anyhow::Result; -use apalis::{ - layers::{sentry::SentryLayer, tracing::TraceLayer}, - prelude::*, - redis::RedisStorage, -}; +use apalis::layers::tracing::TraceLayer; +use apalis::{layers::sentry::SentryLayer, prelude::*}; +use apalis_redis::RedisStorage; use email_service::Email; use tokio::time::sleep; @@ -129,7 +127,7 @@ async fn main() -> Result<()> { .with(sentry_tracing::layer()) .init(); - let conn = apalis::redis::connect(redis_url).await?; + let conn = apalis_redis::connect(redis_url).await?; let storage = RedisStorage::new(conn); //This can be in another part of the program produce_jobs(storage.clone()).await?; diff --git a/examples/sqlite/Cargo.toml b/examples/sqlite/Cargo.toml index 4ee1ed41..646ab7d8 100644 --- a/examples/sqlite/Cargo.toml +++ b/examples/sqlite/Cargo.toml @@ -8,12 +8,8 @@ license = "MIT OR Apache-2.0" [dependencies] anyhow = "1" tokio = { version = "1", features = ["full"] } -apalis = { path = "../../", features = [ - "sqlite", - "limit", - "tracing", - "tokio-comp", -] } +apalis = { path = "../../", features = ["limit", "tracing", "tokio-comp"] } +apalis-sql = { path = "../../packages/apalis-sql", features = ["sqlite"] } serde = { version = "1", features = ["derive"] } tracing-subscriber = "0.3.11" chrono = { version = "0.4", default-features = false, features = ["clock"] } diff --git a/examples/sqlite/src/main.rs b/examples/sqlite/src/main.rs index 2b086b0e..9bac9683 100644 --- a/examples/sqlite/src/main.rs +++ b/examples/sqlite/src/main.rs @@ -2,9 +2,9 @@ mod job; use anyhow::Result; use apalis::utils::TokioExecutor; -use apalis::{layers::tracing::TraceLayer, prelude::*, sqlite::SqliteStorage}; +use apalis::{layers::tracing::TraceLayer, prelude::*}; +use apalis_sql::sqlite::SqliteStorage; use chrono::Utc; - use email_service::{send_email, Email}; use job::Notification; use sqlx::SqlitePool; diff --git a/examples/tracing/Cargo.toml b/examples/tracing/Cargo.toml index 7872597c..07f5159a 100644 --- a/examples/tracing/Cargo.toml +++ b/examples/tracing/Cargo.toml @@ -7,9 +7,10 @@ license = "MIT OR Apache-2.0" [dependencies] anyhow = "1" -apalis = { path = "../../", features = ["redis"] } +apalis = { path = "../../" } +apalis-redis = { path = "../../packages/apalis-redis" } serde = "1" -tokio = { version ="1", features = ["macros"]} +tokio = { version = "1", features = ["macros"] } env_logger = "0.10" tracing-subscriber = { version = "0.3.11", features = ["env-filter", "json"] } chrono = { version = "0.4", default-features = false, features = ["clock"] } diff --git a/examples/tracing/src/main.rs b/examples/tracing/src/main.rs index 64323b5d..8e781054 100644 --- a/examples/tracing/src/main.rs +++ b/examples/tracing/src/main.rs @@ -1,16 +1,15 @@ use anyhow::Result; -use std::error::Error; -use std::fmt; -use std::time::Duration; -use tracing_subscriber::prelude::*; - +use apalis::layers::tracing::TraceLayer; use apalis::{ - layers::tracing::TraceLayer, prelude::{Monitor, Storage, WorkerBuilder, WorkerFactoryFn}, - redis::RedisStorage, utils::TokioExecutor, }; +use apalis_redis::RedisStorage; +use std::error::Error; +use std::fmt; +use std::time::Duration; +use tracing_subscriber::prelude::*; use tokio::time::sleep; @@ -63,7 +62,7 @@ async fn main() -> Result<()> { .with(fmt_layer) .init(); - let conn = apalis::redis::connect(redis_url) + let conn = apalis_redis::connect(redis_url) .await .expect("Could not connect to RedisStorage"); let storage = RedisStorage::new(conn); diff --git a/packages/apalis-core/src/builder.rs b/packages/apalis-core/src/builder.rs index 2449eef9..b462da9f 100644 --- a/packages/apalis-core/src/builder.rs +++ b/packages/apalis-core/src/builder.rs @@ -225,27 +225,20 @@ pub trait WorkerFactoryFn { /// - An async function with an argument of the item being processed plus up-to 16 arguments that are extracted from the request [`Data`] /// /// A function can return: - /// - Unit + /// - () /// - primitive /// - Result /// - impl IntoResponse /// /// ```rust + /// # use apalis_core::layers::extensions::Data; /// #[derive(Debug)] /// struct Email; /// #[derive(Debug)] /// struct PgPool; - /// # struct PgError; /// - /// async fn send_email(email: Email) { - /// // Implementation of the job function - /// // ... - /// } - /// - /// async fn send_email(email: Email, data: Data) -> Result<(), PgError> { - /// // Implementation of the job function? - /// // ... - /// Ok(()) + /// async fn send_email(email: Email, data: Data) { + /// // Implementation of the task function? /// } /// ``` /// diff --git a/packages/apalis-core/src/monitor/mod.rs b/packages/apalis-core/src/monitor/mod.rs index c4837e99..5168985e 100644 --- a/packages/apalis-core/src/monitor/mod.rs +++ b/packages/apalis-core/src/monitor/mod.rs @@ -313,7 +313,7 @@ mod tests { #[tokio::test] async fn it_works() { let backend = MemoryStorage::new(); - let handle = backend.clone(); + let mut handle = backend.clone(); tokio::spawn(async move { for i in 0..10 { @@ -339,7 +339,7 @@ mod tests { #[tokio::test] async fn test_monitor_run() { let backend = MemoryStorage::new(); - let handle = backend.clone(); + let mut handle = backend.clone(); tokio::spawn(async move { for i in 0..1000 { diff --git a/packages/apalis-core/src/worker/mod.rs b/packages/apalis-core/src/worker/mod.rs index 3f3fa599..e46866ff 100644 --- a/packages/apalis-core/src/worker/mod.rs +++ b/packages/apalis-core/src/worker/mod.rs @@ -721,7 +721,7 @@ mod tests { #[tokio::test] async fn it_works() { let backend = MemoryStorage::new(); - let handle = backend.clone(); + let mut handle = backend.clone(); tokio::spawn(async move { for i in 0..ITEMS { diff --git a/packages/apalis-cron/README.md b/packages/apalis-cron/README.md index d9d0e0c4..c8a2e741 100644 --- a/packages/apalis-cron/README.md +++ b/packages/apalis-cron/README.md @@ -6,32 +6,41 @@ Since apalis-cron is build on top of apalis which supports tower middleware, you ## Example ```rust -use apalis::prelude::*; -use apalis::layers::{Extension, DefaultRetryPolicy, RetryLayer}; -use apalis::cron::Schedule; +use apalis::layers::retry::RetryLayer; +use apalis::layers::retry::RetryPolicy; use tower::ServiceBuilder; +use apalis_cron::Schedule; use std::str::FromStr; +use apalis::prelude::*; +use apalis_cron::CronStream; +use chrono::{DateTime, Utc}; -#[derive(Default, Debug, Clone)] -struct Reminder; +#[derive(Clone)] +struct FakeService; +impl FakeService { + fn execute(&self, item: Reminder){} +} -async fn send_reminder(job: Reminder, ctx: JobContext) { - // Do reminder stuff +#[derive(Default, Debug, Clone)] +struct Reminder(DateTime); +impl From> for Reminder { + fn from(t: DateTime) -> Self { + Reminder(t) + } +} +async fn send_reminder(job: Reminder, svc: Data) { + svc.execute(job); } #[tokio::main] async fn main() { let schedule = Schedule::from_str("@daily").unwrap(); - - let service = ServiceBuilder::new() - .layer(RetryLayer::new(DefaultRetryPolicy)) - .service(job_fn(send_reminder)); - - let worker = WorkerBuilder::new("daily-cron-worker") - .stream(CronStream::new(schedule).to_stream()) - .build(service); - - Monitor::new() + let worker = WorkerBuilder::new("morning-cereal") + .layer(RetryLayer::new(RetryPolicy::retries(5))) + .data(FakeService) + .stream(CronStream::new(schedule).into_stream()) + .build_fn(send_reminder); + Monitor::::new() .register(worker) .run() .await diff --git a/packages/apalis-cron/src/lib.rs b/packages/apalis-cron/src/lib.rs index 3a16355c..50019456 100644 --- a/packages/apalis-cron/src/lib.rs +++ b/packages/apalis-cron/src/lib.rs @@ -15,17 +15,12 @@ //! ## Example //! //! ```rust,no_run -//! # use apalis_utils::layers::retry::RetryLayer; -//! # use apalis_utils::layers::retry::DefaultRetryPolicy; -//! # use apalis_core::extensions::Data; -//! # use apalis_core::service_fn::service_fn; +//! # use apalis::layers::retry::RetryLayer; +//! # use apalis::layers::retry::RetryPolicy; //! use tower::ServiceBuilder; //! use apalis_cron::Schedule; //! use std::str::FromStr; -//! # use apalis_core::monitor::Monitor; -//! # use apalis_core::builder::WorkerBuilder; -//! # use apalis_core::builder::WorkerFactoryFn; -//! # use apalis_utils::TokioExecutor; +//! # use apalis::prelude::*; //! use apalis_cron::CronStream; //! use chrono::{DateTime, Utc}; //! @@ -50,7 +45,7 @@ //! async fn main() { //! let schedule = Schedule::from_str("@daily").unwrap(); //! let worker = WorkerBuilder::new("morning-cereal") -//! .layer(RetryLayer::new(DefaultRetryPolicy)) +//! .layer(RetryLayer::new(RetryPolicy::retries(5))) //! .data(FakeService) //! .stream(CronStream::new(schedule).into_stream()) //! .build_fn(send_reminder); diff --git a/packages/apalis-redis/Cargo.toml b/packages/apalis-redis/Cargo.toml index 2f581eda..500f321a 100644 --- a/packages/apalis-redis/Cargo.toml +++ b/packages/apalis-redis/Cargo.toml @@ -37,7 +37,7 @@ async-trait = "0.1.80" tokio = { version = "1", features = ["macros", "rt-multi-thread"] } email-service = { path = "../../examples/email-service" } apalis = { path = "../../", default-features = false, features = [ - "tokio-comp", "redis" + "tokio-comp", ] } [features] diff --git a/packages/apalis-redis/src/lib.rs b/packages/apalis-redis/src/lib.rs index 3c3c96cf..40246519 100644 --- a/packages/apalis-redis/src/lib.rs +++ b/packages/apalis-redis/src/lib.rs @@ -8,12 +8,12 @@ //! apalis storage using Redis as a backend //! ```rust,no_run //! use apalis::prelude::*; -//! use apalis::redis::{RedisStorage, Config}; +//! use apalis_redis::{RedisStorage, Config}; //! use email_service::send_email; //! //! #[tokio::main] //! async fn main() { -//! let conn = apalis::redis::connect("redis://127.0.0.1/").await.unwrap(); +//! let conn = apalis_redis::connect("redis://127.0.0.1/").await.unwrap(); //! let storage = RedisStorage::new(conn); //! Monitor::::new() //! .register( diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index 31a76c1f..ea845518 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -153,7 +153,7 @@ impl Default for Config { max_retries: 5, keep_alive: Duration::from_secs(30), enqueue_scheduled: Duration::from_secs(30), - namespace: String::from("apalis::redis"), + namespace: String::from("apalis_redis"), } } } diff --git a/src/lib.rs b/src/lib.rs index ea76136b..27c99abd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -19,7 +19,7 @@ //! ```rust, no_run //! use apalis::prelude::*; //! use serde::{Deserialize, Serialize}; -//! use apalis::redis::{RedisStorage, Config}; +//! use apalis_redis::{RedisStorage, Config}; //! //! #[derive(Debug, Deserialize, Serialize)] //! struct Email { @@ -33,7 +33,7 @@ //! #[tokio::main] //! async fn main() { //! let redis = std::env::var("REDIS_URL").expect("Missing REDIS_URL env variable"); -//! let conn = apalis::redis::connect(redis).await.unwrap(); +//! let conn = apalis_redis::connect(redis).await.unwrap(); //! let storage = RedisStorage::new(conn); //! Monitor::::new() //! .register_with_count(2, { @@ -62,42 +62,6 @@ //! [`tower-http`]: https://crates.io/crates/tower-http //! [`Layer`]: https://docs.rs/tower/latest/tower/trait.Layer.html //! [`Stream`]: https://docs.rs/futures/latest/futures/stream/trait.Stream.html - -/// Include the default Redis storage -#[cfg(feature = "redis")] -#[cfg_attr(docsrs, doc(cfg(feature = "redis")))] -pub mod redis { - pub use apalis_redis::*; -} - -/// Include the default Sqlite storage -#[cfg(feature = "sqlite")] -#[cfg_attr(docsrs, doc(cfg(feature = "sqlite")))] -pub mod sqlite { - pub use apalis_sql::sqlite::*; -} - -/// Include the default Postgres storage -#[cfg(feature = "postgres")] -#[cfg_attr(docsrs, doc(cfg(feature = "postgres")))] -pub mod postgres { - pub use apalis_sql::postgres::*; -} - -/// Include the default MySQL storage -#[cfg(feature = "mysql")] -#[cfg_attr(docsrs, doc(cfg(feature = "mysql")))] -pub mod mysql { - pub use apalis_sql::mysql::*; -} - -/// Include Cron utilities -#[cfg(feature = "cron")] -#[cfg_attr(docsrs, doc(cfg(feature = "cron")))] -pub mod cron { - pub use apalis_cron::*; -} - /// apalis fully supports middleware via [`Layer`](https://docs.rs/tower/latest/tower/trait.Layer.html) pub mod layers; From fc72bc75b286f0214e08a48a076f586f844ba647 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Wed, 10 Jul 2024 07:29:19 +0300 Subject: [PATCH 21/59] standardize backend for storage and mq (#358) * fix: standardize backend for storage and mq * fix: minor fixes --- README.md | 2 +- benches/storages.rs | 2 +- examples/actix-web/src/main.rs | 2 +- examples/axum/src/main.rs | 2 +- examples/basics/src/main.rs | 2 +- examples/mysql/src/main.rs | 2 +- examples/postgres/src/main.rs | 2 +- examples/prometheus/src/main.rs | 2 +- examples/redis-deadpool/src/main.rs | 2 +- examples/redis-mq-example/src/main.rs | 2 +- examples/redis-with-msg-pack/src/main.rs | 2 +- examples/redis/src/main.rs | 2 +- examples/rest-api/src/main.rs | 8 ++--- examples/sentry/src/main.rs | 2 +- examples/sqlite/src/main.rs | 4 +-- examples/tracing/src/main.rs | 2 +- packages/apalis-core/src/builder.rs | 40 ++---------------------- packages/apalis-core/src/layers.rs | 2 +- packages/apalis-core/src/monitor/mod.rs | 4 +-- packages/apalis-core/src/worker/mod.rs | 6 ++-- packages/apalis-redis/src/lib.rs | 2 +- packages/apalis-sql/src/postgres.rs | 2 +- src/lib.rs | 2 +- 23 files changed, 32 insertions(+), 66 deletions(-) diff --git a/README.md b/README.md index 2ecc975e..663549cc 100644 --- a/README.md +++ b/README.md @@ -93,7 +93,7 @@ async fn main() -> { .register_with_count(2, { WorkerBuilder::new(format!("email-worker")) .data(0usize) - .with_storage(storage) + .backend(storage) .build_fn(send_email) }) .run() diff --git a/benches/storages.rs b/benches/storages.rs index 721a3f0f..bf6a8397 100644 --- a/benches/storages.rs +++ b/benches/storages.rs @@ -37,7 +37,7 @@ macro_rules! define_bench { let worker = WorkerBuilder::new(format!("{}-bench", $name)) .data(c) - .source(storage) + .backend(storage) .build_fn(handle_test_job); worker }) diff --git a/examples/actix-web/src/main.rs b/examples/actix-web/src/main.rs index 1efb1716..c816302b 100644 --- a/examples/actix-web/src/main.rs +++ b/examples/actix-web/src/main.rs @@ -45,7 +45,7 @@ async fn main() -> Result<()> { .register_with_count(2, { WorkerBuilder::new("tasty-avocado") .layer(TraceLayer::new()) - .with_storage(storage) + .backend(storage) .build_fn(send_email) }) .run_with_signal(signal::ctrl_c()); diff --git a/examples/axum/src/main.rs b/examples/axum/src/main.rs index 3ef43012..a3d7774a 100644 --- a/examples/axum/src/main.rs +++ b/examples/axum/src/main.rs @@ -77,7 +77,7 @@ async fn main() -> Result<()> { .register_with_count(2, { WorkerBuilder::new("tasty-pear") .layer(TraceLayer::new()) - .with_storage(storage.clone()) + .backend(storage.clone()) .build_fn(send_email) }) .run() diff --git a/examples/basics/src/main.rs b/examples/basics/src/main.rs index 8ef6829a..67a78a33 100644 --- a/examples/basics/src/main.rs +++ b/examples/basics/src/main.rs @@ -101,7 +101,7 @@ async fn main() -> Result<(), std::io::Error> { // Add shared context to all jobs executed by this worker .data(EmailService::new()) .data(ValidEmailCache::new()) - .with_storage(sqlite) + .backend(sqlite) .build_fn(send_email) }) .shutdown_timeout(Duration::from_secs(5)) diff --git a/examples/mysql/src/main.rs b/examples/mysql/src/main.rs index 4b520b5b..139d1714 100644 --- a/examples/mysql/src/main.rs +++ b/examples/mysql/src/main.rs @@ -37,7 +37,7 @@ async fn main() -> Result<()> { .register_with_count(1, { WorkerBuilder::new("tasty-avocado") .layer(TraceLayer::new()) - .with_storage(mysql) + .backend(mysql) .build_fn(send_email) }) .run() diff --git a/examples/postgres/src/main.rs b/examples/postgres/src/main.rs index 8f06c897..cece2db3 100644 --- a/examples/postgres/src/main.rs +++ b/examples/postgres/src/main.rs @@ -49,7 +49,7 @@ async fn main() -> Result<()> { WorkerBuilder::new("tasty-orange") .layer(TraceLayer::new()) .layer(RetryLayer::new(RetryPolicy::retries(5))) - .with_storage(pg) + .backend(pg) .build_fn(send_email) }) .on_event(|e| debug!("{e:?}")) diff --git a/examples/prometheus/src/main.rs b/examples/prometheus/src/main.rs index 8dc14390..160dfc58 100644 --- a/examples/prometheus/src/main.rs +++ b/examples/prometheus/src/main.rs @@ -52,7 +52,7 @@ async fn main() -> Result<()> { .register_with_count(2, { WorkerBuilder::new("tasty-banana") .layer(PrometheusLayer) - .with_storage(storage.clone()) + .backend(storage.clone()) .build_fn(send_email) }) .run() diff --git a/examples/redis-deadpool/src/main.rs b/examples/redis-deadpool/src/main.rs index 7dfe928a..0b538945 100644 --- a/examples/redis-deadpool/src/main.rs +++ b/examples/redis-deadpool/src/main.rs @@ -26,7 +26,7 @@ async fn main() -> Result<()> { produce_jobs(&mut storage).await?; let worker = WorkerBuilder::new("rango-tango") - .with_storage(storage) + .backend(storage) .data(pool) .build_fn(send_email); diff --git a/examples/redis-mq-example/src/main.rs b/examples/redis-mq-example/src/main.rs index c7ce72eb..638cdf5a 100644 --- a/examples/redis-mq-example/src/main.rs +++ b/examples/redis-mq-example/src/main.rs @@ -138,7 +138,7 @@ async fn main() -> Result<()> { let worker = WorkerBuilder::new("rango-tango") .layer(TraceLayer::new()) - .with_mq(mq) + .backend(mq) .build_fn(send_email); Monitor::::new() diff --git a/examples/redis-with-msg-pack/src/main.rs b/examples/redis-with-msg-pack/src/main.rs index 86e7ff5b..9a37165a 100644 --- a/examples/redis-with-msg-pack/src/main.rs +++ b/examples/redis-with-msg-pack/src/main.rs @@ -36,7 +36,7 @@ async fn main() -> Result<()> { produce_jobs(storage.clone()).await?; let worker = WorkerBuilder::new("rango-tango") - .with_storage(storage) + .backend(storage) .build_fn(send_email); Monitor::::new() diff --git a/examples/redis/src/main.rs b/examples/redis/src/main.rs index 89e3c642..8fadcd52 100644 --- a/examples/redis/src/main.rs +++ b/examples/redis/src/main.rs @@ -51,7 +51,7 @@ async fn main() -> Result<()> { .layer(RateLimitLayer::new(5, Duration::from_secs(1))) .layer(TimeoutLayer::new(Duration::from_millis(500))) .data(Count::default()) - .with_storage(storage) + .backend(storage) .build_fn(send_email); Monitor::::new() diff --git a/examples/rest-api/src/main.rs b/examples/rest-api/src/main.rs index 79939321..b0b9ca8e 100644 --- a/examples/rest-api/src/main.rs +++ b/examples/rest-api/src/main.rs @@ -318,28 +318,28 @@ async fn main() -> anyhow::Result<()> { WorkerBuilder::new("tasty-apple") .layer(SentryJobLayer) .layer(TraceLayer::new()) - .with_storage(worker_storage.clone()) + .backend(worker_storage.clone()) .build_fn(send_email) }) .register_with_count(4, move |c| { WorkerBuilder::new(format!("tasty-avocado-{c}")) .layer(SentryJobLayer) .layer(TraceLayer::new()) - .with_storage(sqlite_storage.clone()) + .backend(sqlite_storage.clone()) .build_fn(notification_service) }) .register_with_count(2, move |c| { WorkerBuilder::new(format!("tasty-banana-{c}")) .layer(SentryJobLayer) .layer(TraceLayer::new()) - .with_storage(pg_storage.clone()) + .backend(pg_storage.clone()) .build_fn(document_service) }) .register_with_count(2, move |c| { WorkerBuilder::new(format!("tasty-pear-{c}")) .layer(SentryJobLayer::new()) .layer(TraceLayer::new()) - .with_storage(mysql_storage.clone()) + .backend(mysql_storage.clone()) .build_fn(upload_service) }) .run(); diff --git a/examples/sentry/src/main.rs b/examples/sentry/src/main.rs index a16b90a9..39b96353 100644 --- a/examples/sentry/src/main.rs +++ b/examples/sentry/src/main.rs @@ -138,7 +138,7 @@ async fn main() -> Result<()> { .layer(NewSentryLayer::new_from_top()) .layer(SentryLayer::new()) .layer(TraceLayer::new()) - .with_storage(storage.clone()) + .backend(storage.clone()) .build_fn(email_service) }) .run() diff --git a/examples/sqlite/src/main.rs b/examples/sqlite/src/main.rs index 9bac9683..282898b3 100644 --- a/examples/sqlite/src/main.rs +++ b/examples/sqlite/src/main.rs @@ -62,13 +62,13 @@ async fn main() -> Result<()> { .register_with_count(2, { WorkerBuilder::new("tasty-banana") .layer(TraceLayer::new()) - .with_storage(email_storage) + .backend(email_storage) .build_fn(send_email) }) .register_with_count(10, { WorkerBuilder::new("tasty-mango") .layer(TraceLayer::new()) - .with_storage(notification_storage) + .backend(notification_storage) .build_fn(job::notify) }) .run() diff --git a/examples/tracing/src/main.rs b/examples/tracing/src/main.rs index 8e781054..5e778692 100644 --- a/examples/tracing/src/main.rs +++ b/examples/tracing/src/main.rs @@ -73,7 +73,7 @@ async fn main() -> Result<()> { .register( WorkerBuilder::new("tasty-avocado") .chain(|srv| srv.layer(TraceLayer::new())) - .with_storage(storage) + .backend(storage) .build_fn(email_service), ) .run() diff --git a/packages/apalis-core/src/builder.rs b/packages/apalis-core/src/builder.rs index b462da9f..7bcf1b9d 100644 --- a/packages/apalis-core/src/builder.rs +++ b/packages/apalis-core/src/builder.rs @@ -9,11 +9,9 @@ use tower::{ use crate::{ error::Error, layers::extensions::Data, - mq::MessageQueue, request::Request, service_fn::service_fn, service_fn::ServiceFn, - storage::Storage, worker::{Ready, Worker, WorkerId}, Backend, }; @@ -55,7 +53,7 @@ impl WorkerBuilder<(), (), Identity, Serv> { } } -impl WorkerBuilder { +impl WorkerBuilder { /// Consume a stream directly pub fn stream>, Error>> + Send + 'static, NJ>( self, @@ -70,36 +68,8 @@ impl WorkerBuilder { } } - /// Set the source to a [Storage] - pub fn with_storage, NJ>( - self, - storage: NS, - ) -> WorkerBuilder { - WorkerBuilder { - request: PhantomData, - layer: self.layer, - source: storage, - id: self.id, - service: self.service, - } - } - - /// Set the source to a [MessageQueue] - pub fn with_mq, NJ>( - self, - message_queue: NS, - ) -> WorkerBuilder { - WorkerBuilder { - request: PhantomData, - layer: self.layer, - source: message_queue, - id: self.id, - service: self.service, - } - } - - /// Set the source to a generic backend that implements only [Backend] - pub fn source>, NJ>( + /// Set the source to a backend that implements [Backend] + pub fn backend>, NJ>( self, backend: NS, ) -> WorkerBuilder { @@ -168,16 +138,12 @@ where S::Response: 'static, M: Layer, - // P::Layer: Layer, - // M: Layer<>::Service>, { type Source = P; type Service = M::Service; - /// Build a worker, given a tower service fn build(self, service: S) -> Worker> { let worker_id = self.id; - // let common_layer = self.source.common_layer(worker_id.clone()); let poller = self.source; let middleware = self.layer; let service = middleware.service(service); diff --git a/packages/apalis-core/src/layers.rs b/packages/apalis-core/src/layers.rs index dc480cde..bdc504bb 100644 --- a/packages/apalis-core/src/layers.rs +++ b/packages/apalis-core/src/layers.rs @@ -91,7 +91,7 @@ pub mod extensions { /// /// let worker = WorkerBuilder::new("tasty-avocado") /// .data(state) - /// .source(MemoryStorage::new()) + /// .backend(MemoryStorage::new()) /// .build(service_fn(email_service)); /// ``` diff --git a/packages/apalis-core/src/monitor/mod.rs b/packages/apalis-core/src/monitor/mod.rs index 5168985e..af5cb8ec 100644 --- a/packages/apalis-core/src/monitor/mod.rs +++ b/packages/apalis-core/src/monitor/mod.rs @@ -325,7 +325,7 @@ mod tests { Ok::<_, io::Error>(request) }); let worker = WorkerBuilder::new("rango-tango") - .source(backend) + .backend(backend) .build(service); let monitor: Monitor = Monitor::new(); let monitor = monitor.register(worker); @@ -351,7 +351,7 @@ mod tests { Ok::<_, io::Error>(request) }); let worker = WorkerBuilder::new("rango-tango") - .source(backend) + .backend(backend) .build(service); let monitor: Monitor = Monitor::new(); let monitor = monitor.on_event(|e| { diff --git a/packages/apalis-core/src/worker/mod.rs b/packages/apalis-core/src/worker/mod.rs index e46866ff..864d4291 100644 --- a/packages/apalis-core/src/worker/mod.rs +++ b/packages/apalis-core/src/worker/mod.rs @@ -720,8 +720,8 @@ mod tests { #[tokio::test] async fn it_works() { - let backend = MemoryStorage::new(); - let mut handle = backend.clone(); + let in_memory = MemoryStorage::new(); + let mut handle = in_memory.clone(); tokio::spawn(async move { for i in 0..ITEMS { @@ -749,7 +749,7 @@ mod tests { let worker = WorkerBuilder::new("rango-tango") // .chain(|svc| svc.timeout(Duration::from_millis(500))) .data(Count::default()) - .source(backend); + .backend(in_memory); let worker = worker.build_fn(task); let worker = worker.with_executor(TokioTestExecutor); let w = worker.clone(); diff --git a/packages/apalis-redis/src/lib.rs b/packages/apalis-redis/src/lib.rs index 40246519..86815734 100644 --- a/packages/apalis-redis/src/lib.rs +++ b/packages/apalis-redis/src/lib.rs @@ -18,7 +18,7 @@ //! Monitor::::new() //! .register( //! WorkerBuilder::new("tasty-pear") -//! .source(storage.clone()) +//! .backend(storage.clone()) //! .build_fn(send_email), //! ) //! .run() diff --git a/packages/apalis-sql/src/postgres.rs b/packages/apalis-sql/src/postgres.rs index 72a67a26..4dc73c02 100644 --- a/packages/apalis-sql/src/postgres.rs +++ b/packages/apalis-sql/src/postgres.rs @@ -31,7 +31,7 @@ //! .register_with_count(4, { //! WorkerBuilder::new(&format!("tasty-avocado")) //! .data(0usize) -//! .source(pg) +//! .backend(pg) //! .build_fn(send_email) //! }) //! .run() diff --git a/src/lib.rs b/src/lib.rs index 27c99abd..54892a40 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -39,7 +39,7 @@ //! .register_with_count(2, { //! WorkerBuilder::new(&format!("quick-sand")) //! .data(0usize) -//! .source(storage.clone()) +//! .backend(storage.clone()) //! .build_fn(send_email) //! }) //! .run() From 7159edb72818e1811fcabc460a3d4413832fc9d7 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Wed, 10 Jul 2024 10:07:23 +0300 Subject: [PATCH 22/59] feat: standardize cron as backend (#359) --- Cargo.toml | 2 +- examples/async-std-runtime/src/main.rs | 2 +- examples/cron/Cargo.toml | 23 +++++++++++++ examples/cron/src/main.rs | 44 +++++++++++++++++++++++++ packages/apalis-core/src/builder.rs | 1 + packages/apalis-core/src/monitor/mod.rs | 2 +- packages/apalis-core/src/worker/mod.rs | 6 ++-- packages/apalis-cron/Cargo.toml | 5 --- packages/apalis-cron/src/lib.rs | 22 ++++++++++++- 9 files changed, 95 insertions(+), 12 deletions(-) create mode 100644 examples/cron/Cargo.toml create mode 100644 examples/cron/src/main.rs diff --git a/Cargo.toml b/Cargo.toml index 431d740d..c1551daf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -113,7 +113,7 @@ members = [ "examples/basics", "examples/redis-with-msg-pack", "examples/redis-deadpool", - "examples/redis-mq-example", + "examples/redis-mq-example", "examples/cron", ] diff --git a/examples/async-std-runtime/src/main.rs b/examples/async-std-runtime/src/main.rs index f77a8958..58f2afae 100644 --- a/examples/async-std-runtime/src/main.rs +++ b/examples/async-std-runtime/src/main.rs @@ -44,7 +44,7 @@ async fn main() -> Result<()> { let worker = WorkerBuilder::new("daily-cron-worker") .layer(RetryLayer::new(RetryPolicy::retries(5))) .layer(TraceLayer::new().make_span_with(ReminderSpan::new())) - .stream(CronStream::new(schedule).into_stream()) + .backend(CronStream::new(schedule)) .build_fn(send_reminder); Monitor::::new() diff --git a/examples/cron/Cargo.toml b/examples/cron/Cargo.toml new file mode 100644 index 00000000..f187e27c --- /dev/null +++ b/examples/cron/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "cron" +version = "0.1.0" +edition = "2021" + +[dependencies] +anyhow = "1" +apalis = { path = "../../", default-features = false, features = [ + "tokio-comp", + "tracing", + "limit", +] } +apalis-cron = { path = "../../packages/apalis-cron" } +tokio = { version = "1", features = ["full"] } +serde = "1" +tracing-subscriber = "0.3.11" +chrono = { version = "0.4", default-features = false, features = ["clock"] } +pin-project-lite = "0.2.9" +tower = { version = "0.4", features = ["load-shed"] } + +[dependencies.tracing] +default-features = false +version = "0.1" diff --git a/examples/cron/src/main.rs b/examples/cron/src/main.rs new file mode 100644 index 00000000..4a8fb74c --- /dev/null +++ b/examples/cron/src/main.rs @@ -0,0 +1,44 @@ +use apalis::prelude::*; +use apalis::utils::TokioExecutor; +use apalis_cron::CronStream; +use apalis_cron::Schedule; +use chrono::{DateTime, Utc}; +use std::str::FromStr; +use std::time::Duration; +use tower::limit::RateLimitLayer; +use tower::load_shed::LoadShedLayer; + +#[derive(Clone)] +struct FakeService; +impl FakeService { + fn execute(&self, item: Reminder) { + dbg!(&item.0); + } +} + +#[derive(Default, Debug, Clone)] +struct Reminder(DateTime); +impl From> for Reminder { + fn from(t: DateTime) -> Self { + Reminder(t) + } +} +async fn send_reminder(job: Reminder, svc: Data) { + svc.execute(job); +} + +#[tokio::main] +async fn main() { + let schedule = Schedule::from_str("1/1 * * * * *").unwrap(); + let worker = WorkerBuilder::new("morning-cereal") + .layer(LoadShedLayer::new()) // Important when you have layers that block the service + .layer(RateLimitLayer::new(1, Duration::from_secs(2))) + .data(FakeService) + .backend(CronStream::new(schedule)) + .build_fn(send_reminder); + Monitor::::new() + .register(worker) + .run() + .await + .unwrap(); +} diff --git a/packages/apalis-core/src/builder.rs b/packages/apalis-core/src/builder.rs index 7bcf1b9d..4d837f38 100644 --- a/packages/apalis-core/src/builder.rs +++ b/packages/apalis-core/src/builder.rs @@ -55,6 +55,7 @@ impl WorkerBuilder<(), (), Identity, Serv> { impl WorkerBuilder { /// Consume a stream directly + #[deprecated(since = "0.6.0", note = "Consider using the `.backend`")] pub fn stream>, Error>> + Send + 'static, NJ>( self, stream: NS, diff --git a/packages/apalis-core/src/monitor/mod.rs b/packages/apalis-core/src/monitor/mod.rs index af5cb8ec..aad4dafc 100644 --- a/packages/apalis-core/src/monitor/mod.rs +++ b/packages/apalis-core/src/monitor/mod.rs @@ -97,7 +97,7 @@ impl Monitor { <<

>>::Layer as Layer>::Service as Service>>::Future: Send, <<

>>::Layer as Layer>::Service as Service>>::Error: - Send + std::error::Error + Sync, + Send + Into + Sync, { self.workers.push(worker.with_monitor(&self)); diff --git a/packages/apalis-core/src/worker/mod.rs b/packages/apalis-core/src/worker/mod.rs index 864d4291..b9c071f0 100644 --- a/packages/apalis-core/src/worker/mod.rs +++ b/packages/apalis-core/src/worker/mod.rs @@ -276,7 +276,7 @@ impl Worker> { <<

>>::Layer as Layer>::Service as Service>>::Future: Send, <<

>>::Layer as Layer>::Service as Service>>::Error: - Send + std::error::Error + Sync, + Send + Into + Sync, { let notifier = Notify::new(); let service = self.state.service; @@ -322,7 +322,7 @@ impl Worker> { <<

>>::Layer as Layer>::Service as Service>>::Future: Send, <<

>>::Layer as Layer>::Service as Service>>::Error: - Send + std::error::Error + Sync, + Send + Into + Sync, { let notifier = Notify::new(); let service = self.state.service; @@ -378,7 +378,7 @@ impl Worker> { <<

>>::Layer as Layer>::Service as Service>>::Future: Send, <<

>>::Layer as Layer>::Service as Service>>::Error: - Send + std::error::Error + Sync, + Send + Into + Sync, { let worker_id = self.id.clone(); let notifier = Notify::new(); diff --git a/packages/apalis-cron/Cargo.toml b/packages/apalis-cron/Cargo.toml index 199c2f4d..8eeeb6aa 100644 --- a/packages/apalis-cron/Cargo.toml +++ b/packages/apalis-cron/Cargo.toml @@ -11,7 +11,6 @@ description = "A simple yet extensible library for cron-like job scheduling for [dependencies] apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.1", default-features = false, features = [ "sleep", - "json", ] } cron = "0.12.1" futures = "0.3.30" @@ -29,10 +28,6 @@ apalis-core = { path = "../../packages/apalis-core" } apalis = { path = "../../", default-features = false, features = ["retry"] } serde = { version = "1.0", features = ["derive"] } -[features] -default = ["tokio-comp"] -async-std-comp = ["async-std"] -tokio-comp = ["tokio/net"] [package.metadata.docs.rs] # defines the configuration attribute `docsrs` diff --git a/packages/apalis-cron/src/lib.rs b/packages/apalis-cron/src/lib.rs index 50019456..024fa9f4 100644 --- a/packages/apalis-cron/src/lib.rs +++ b/packages/apalis-cron/src/lib.rs @@ -58,8 +58,12 @@ //! ``` use apalis_core::data::Extensions; +use apalis_core::layers::Identity; +use apalis_core::poller::Poller; use apalis_core::request::RequestStream; use apalis_core::task::task_id::TaskId; +use apalis_core::worker::WorkerId; +use apalis_core::Backend; use apalis_core::{error::Error, request::Request}; use chrono::{DateTime, TimeZone, Utc}; pub use cron::Schedule; @@ -104,7 +108,7 @@ where Tz::Offset: Send + Sync, { /// Convert to consumable - pub fn into_stream(self) -> RequestStream> { + fn into_stream(self) -> RequestStream> { let timezone = self.timezone.clone(); let stream = async_stream::stream! { let mut schedule = self.schedule.upcoming_owned(timezone.clone()); @@ -128,3 +132,19 @@ where Box::pin(stream) } } + +impl Backend> for CronStream +where + J: From> + Send + Sync + 'static, + Tz: TimeZone + Send + Sync + 'static, + Tz::Offset: Send + Sync, +{ + type Stream = RequestStream>; + + type Layer = Identity; + + fn poll(self, _worker: WorkerId) -> Poller { + let stream = self.into_stream(); + Poller::new(stream, async {}) + } +} From 8ae48dcebb6750d8203b1e94b1893c4cfe833088 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Wed, 10 Jul 2024 10:17:32 +0300 Subject: [PATCH 23/59] fix: remove non-working restapi example (#360) --- Cargo.toml | 4 +- examples/rest-api/Cargo.toml | 19 -- examples/rest-api/README.md | 14 +- examples/rest-api/src/main.rs | 349 ---------------------------------- 4 files changed, 3 insertions(+), 383 deletions(-) delete mode 100644 examples/rest-api/Cargo.toml delete mode 100644 examples/rest-api/src/main.rs diff --git a/Cargo.toml b/Cargo.toml index c1551daf..16815955 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -108,12 +108,12 @@ members = [ "examples/axum", "examples/prometheus", "examples/tracing", - # "examples/rest-api", "examples/async-std-runtime", "examples/basics", "examples/redis-with-msg-pack", "examples/redis-deadpool", - "examples/redis-mq-example", "examples/cron", + "examples/redis-mq-example", + "examples/cron", ] diff --git a/examples/rest-api/Cargo.toml b/examples/rest-api/Cargo.toml deleted file mode 100644 index a6bd2527..00000000 --- a/examples/rest-api/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "rest-api-example" -version = "0.1.0" -authors = ["Njuguna Mureithi "] -edition = "2018" -license = "MIT OR Apache-2.0" - -[dependencies] -anyhow = "1" -apalis = { path = "../../", features = ["redis", "sqlite", "sentry", "postgres", "mysql", "expose"] } -serde = "1" -tokio = { version = "1", features =["macros", "rt-multi-thread"] } -env_logger = "0.10" -actix-web = "4" -futures = "0.3" -actix-cors = "0.6.1" -serde_json = "1" -chrono = { version = "0.4", default-features = false, features = ["clock"] } -email-service = { path = "../email-service" } diff --git a/examples/rest-api/README.md b/examples/rest-api/README.md index 8a3c220e..ea4f534b 100644 --- a/examples/rest-api/README.md +++ b/examples/rest-api/README.md @@ -2,16 +2,4 @@ ![UI](https://github.com/geofmureithi/apalis-board/raw/master/screenshots/workers.png) -## Backend - -``` -cd examples && cargo run -p rest-api -``` - -## Frontend - -``` -git clone https://github.com/geofmureithi/apalis-board -cd apalis-board -yarn && yarn start:dev -``` +Please see https://github.com/geofmureithi/apalis-board for a working example diff --git a/examples/rest-api/src/main.rs b/examples/rest-api/src/main.rs deleted file mode 100644 index b0b9ca8e..00000000 --- a/examples/rest-api/src/main.rs +++ /dev/null @@ -1,349 +0,0 @@ -use std::collections::HashSet; -use std::time::Duration; - -use actix_cors::Cors; -use actix_web::{web, App, HttpResponse, HttpServer, Scope}; -use apalis::{ - layers::{SentryJobLayer, TraceLayer}, - mysql::MysqlStorage, - postgres::PostgresStorage, - prelude::*, - redis::RedisStorage, - sqlite::SqliteStorage, -}; -use futures::future; -use serde::{de::DeserializeOwned, Deserialize, Serialize}; - -use email_service::{send_email, Email}; - -#[derive(Debug, Deserialize, Serialize)] -struct Notification { - text: String, -} - -impl Job for Notification { - const NAME: &'static str = "sqlite::Notification"; -} - -async fn notification_service(notif: Notification) -> Result<(), Error> { - println!("Attempting to send notification {}", notif.text); - tokio::time::sleep(Duration::from_millis(1)).await; - Ok(()) -} - -#[derive(Debug, Deserialize, Serialize)] -struct Document { - text: String, -} - -impl Job for Document { - const NAME: &'static str = "postgres::Document"; -} - -async fn document_service(doc: Document) -> Result<(), Error> { - println!("Attempting to convert {} to pdf", doc.text); - tokio::time::sleep(Duration::from_millis(1)).await; - Ok(()) -} - -#[derive(Debug, Deserialize, Serialize)] -struct Upload { - url: String, -} - -impl Job for Upload { - const NAME: &'static str = "mysql::Upload"; -} - -async fn upload_service(upload: Upload) -> Result<(), Error> { - println!("Attempting to upload {} to cloud", upload.url); - tokio::time::sleep(Duration::from_millis(1)).await; - Ok(()) -} - -#[derive(Serialize)] -struct JobsResult { - jobs: Vec>, - counts: StateCount, -} -#[derive(Deserialize)] -struct Filter { - #[serde(default)] - status: State, - #[serde(default)] - page: i32, -} - -async fn push_job(job: web::Json, storage: web::Data) -> HttpResponse -where - J: Serialize + DeserializeOwned + 'static, - S: Storage, -{ - let storage = &*storage.into_inner(); - let mut storage = storage.clone(); - let res = storage.push(job.into_inner()).await; - match res { - Ok(id) => HttpResponse::Ok().body(format!("Job with ID [{id}] added to queue")), - Err(e) => HttpResponse::InternalServerError().body(format!("{e}")), - } -} - -async fn get_jobs(storage: web::Data, filter: web::Query) -> HttpResponse -where - J: Serialize + DeserializeOwned + 'static, - S: Storage + JobStreamExt + Send, -{ - let storage = &*storage.into_inner(); - let mut storage = storage.clone(); - let counts = storage.counts().await.unwrap(); - let jobs = storage.list_jobs(&filter.status, filter.page).await; - - match jobs { - Ok(jobs) => HttpResponse::Ok().json(JobsResult { jobs, counts }), - Err(e) => HttpResponse::InternalServerError().body(format!("{e}")), - } -} - -async fn get_workers(storage: web::Data) -> HttpResponse -where - J: Serialize + DeserializeOwned + 'static, - S: Storage + JobStreamExt, -{ - let storage = &*storage.into_inner(); - let mut storage = storage.clone(); - let workers = storage.list_workers().await; - match workers { - Ok(workers) => HttpResponse::Ok().json(serde_json::to_value(workers).unwrap()), - Err(e) => HttpResponse::InternalServerError().body(format!("{e}")), - } -} - -async fn get_job(job_id: web::Path, storage: web::Data) -> HttpResponse -where - J: Serialize + DeserializeOwned + 'static, - S: Storage + 'static, -{ - let storage = &*storage.into_inner(); - let storage = storage.clone(); - let res = storage.fetch_by_id(&job_id).await; - match res { - Ok(Some(job)) => HttpResponse::Ok().json(job), - Ok(None) => HttpResponse::NotFound().finish(), - Err(e) => HttpResponse::InternalServerError().body(format!("{e}")), - } -} - -trait StorageRest: Storage { - fn name(&self) -> String; -} - -impl StorageRest for S -where - S: Storage + JobStreamExt + 'static, - J: Serialize + DeserializeOwned + 'static, -{ - fn name(&self) -> String { - J::NAME.to_string() - } -} - -#[derive(Debug, Deserialize, Serialize)] -struct Queue { - name: String, -} - -#[derive(Debug, Deserialize, Serialize)] -struct QueueList { - set: HashSet, -} - -struct StorageApiBuilder { - scope: Scope, - list: QueueList, -} - -impl StorageApiBuilder { - fn add_storage(mut self, storage: S) -> Self - where - J: Serialize + DeserializeOwned + 'static, - S: StorageRest + JobStreamExt, - S: Storage, - S: 'static + Send, - { - let name = J::NAME.to_string(); - self.list.set.insert(name); - - Self { - scope: self.scope.service( - Scope::new(J::NAME) - .app_data(web::Data::new(storage)) - .route("", web::get().to(get_jobs::)) // Fetch jobs in queue - .route("/workers", web::get().to(get_workers::)) // Fetch jobs in queue - .route("/job", web::put().to(push_job::)) // Allow add jobs via api - .route("/job/{job_id}", web::get().to(get_job::)), // Allow fetch specific job - ), - list: self.list, - } - } - - fn build(self) -> Scope { - async fn fetch_queues(queues: web::Data) -> HttpResponse { - let mut queue_result = Vec::new(); - for queue in &queues.set { - queue_result.push(Queue { - name: queue.clone(), - }) - } - #[derive(Serialize)] - struct Res { - queues: Vec, - } - - HttpResponse::Ok().json(Res { - queues: queue_result, - }) - } - - self.scope - .app_data(web::Data::new(self.list)) - .route("", web::get().to(fetch_queues)) - } - - fn new() -> Self { - Self { - scope: Scope::new("queues"), - list: QueueList { - set: HashSet::new(), - }, - } - } -} - -async fn produce_redis_jobs(mut storage: RedisStorage) { - for i in 0..10 { - storage - .push(Email { - to: format!("test{i}@example.com"), - text: "Test background job from apalis".to_string(), - subject: "Background email job".to_string(), - }) - .await - .unwrap(); - } -} -async fn produce_sqlite_jobs(mut storage: SqliteStorage) { - for i in 0..100 { - storage - .push(Notification { - text: format!("Notiification: {i}"), - }) - .await - .unwrap(); - } -} - -async fn produce_postgres_jobs(mut storage: PostgresStorage) { - for i in 0..100 { - storage - .push(Document { - text: format!("Document: {i}"), - }) - .await - .unwrap(); - } -} - -async fn produce_mysql_jobs(mut storage: MysqlStorage) { - for i in 0..100 { - storage - .push(Upload { - url: format!("Upload: {i}"), - }) - .await - .unwrap(); - } -} - -#[tokio::main(flavor = "multi_thread", worker_threads = 10)] -async fn main() -> anyhow::Result<()> { - std::env::set_var("RUST_LOG", "debug,sqlx::query=error"); - env_logger::init(); - let database_url = std::env::var("DATABASE_URL").expect("Must specify DATABASE_URL"); - let pg: PostgresStorage = PostgresStorage::connect(database_url).await?; - pg.setup().await.expect("Unable to migrate"); - - let database_url = std::env::var("MYSQL_URL").expect("Must specify MYSQL_URL"); - - let mysql: MysqlStorage = MysqlStorage::connect(database_url).await?; - mysql - .setup() - .await - .expect("unable to run migrations for mysql"); - - let storage = RedisStorage::connect("redis://127.0.0.1/").await?; - - let sqlite = SqliteStorage::connect("sqlite://data.db").await?; - sqlite.setup().await.expect("Unable to migrate"); - - let worker_storage = storage.clone(); - let sqlite_storage = sqlite.clone(); - let pg_storage = pg.clone(); - let mysql_storage = mysql.clone(); - - produce_redis_jobs(storage.clone()).await; - produce_sqlite_jobs(sqlite.clone()).await; - produce_postgres_jobs(pg_storage.clone()).await; - produce_mysql_jobs(mysql.clone()).await; - let http = async { - HttpServer::new(move || { - App::new().wrap(Cors::permissive()).service( - web::scope("/api").service( - StorageApiBuilder::new() - .add_storage(storage.clone()) - .add_storage(sqlite.clone()) - .add_storage(pg.clone()) - .add_storage(mysql.clone()) - .build(), - ), - ) - }) - .bind("127.0.0.1:8000")? - .run() - .await?; - Ok(()) - }; - - let worker = Monitor::new() - .register_with_count(1, move |_| { - WorkerBuilder::new("tasty-apple") - .layer(SentryJobLayer) - .layer(TraceLayer::new()) - .backend(worker_storage.clone()) - .build_fn(send_email) - }) - .register_with_count(4, move |c| { - WorkerBuilder::new(format!("tasty-avocado-{c}")) - .layer(SentryJobLayer) - .layer(TraceLayer::new()) - .backend(sqlite_storage.clone()) - .build_fn(notification_service) - }) - .register_with_count(2, move |c| { - WorkerBuilder::new(format!("tasty-banana-{c}")) - .layer(SentryJobLayer) - .layer(TraceLayer::new()) - .backend(pg_storage.clone()) - .build_fn(document_service) - }) - .register_with_count(2, move |c| { - WorkerBuilder::new(format!("tasty-pear-{c}")) - .layer(SentryJobLayer::new()) - .layer(TraceLayer::new()) - .backend(mysql_storage.clone()) - .build_fn(upload_service) - }) - .run(); - future::try_join(http, worker).await?; - - Ok(()) -} From f1daab31aeae88417963fd4ba929730c8583f1c0 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Wed, 10 Jul 2024 11:02:03 +0300 Subject: [PATCH 24/59] fix: expose the missing apis (#361) --- examples/redis-mq-example/src/main.rs | 5 +-- packages/apalis-core/src/lib.rs | 6 +++ packages/apalis-redis/src/storage.rs | 46 +++++++++++++++++++- packages/apalis-sql/src/from_row.rs | 46 +++++++++++++++++++- packages/apalis-sql/src/mysql.rs | 46 ++++++++++++-------- packages/apalis-sql/src/postgres.rs | 60 +++++++++++++++------------ packages/apalis-sql/src/sqlite.rs | 50 +++++++++++----------- 7 files changed, 182 insertions(+), 77 deletions(-) diff --git a/examples/redis-mq-example/src/main.rs b/examples/redis-mq-example/src/main.rs index 638cdf5a..ec8b8de8 100644 --- a/examples/redis-mq-example/src/main.rs +++ b/examples/redis-mq-example/src/main.rs @@ -81,10 +81,7 @@ impl MessageQueue for RedisMq { async fn enqueue(&mut self, message: Message) -> Result<(), Self::Error> { let bytes = self .codec - .encode(&RedisJob { - ctx: Default::default(), - job: message, - }) + .encode(&RedisJob::new(message, Default::default())) .unwrap(); self.conn.send_message("email", bytes, None).await?; Ok(()) diff --git a/packages/apalis-core/src/lib.rs b/packages/apalis-core/src/lib.rs index 5511ceef..1ab6244c 100644 --- a/packages/apalis-core/src/lib.rs +++ b/packages/apalis-core/src/lib.rs @@ -22,6 +22,8 @@ #![cfg_attr(docsrs, feature(doc_cfg))] //! # apalis-core //! Utilities for building job and message processing tools. +use std::sync::Arc; + use futures::Stream; use poller::Poller; use worker::WorkerId; @@ -93,6 +95,10 @@ pub trait Codec { fn decode(&self, compact: &Compact) -> Result; } +/// A boxed codec +pub type BoxCodec = + Arc + Sync + Send + 'static>>; + /// Sleep utilities #[cfg(feature = "sleep")] pub async fn sleep(duration: std::time::Duration) { diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index ea845518..008c7f18 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -95,9 +95,51 @@ struct RedisScript { #[derive(Clone, Debug, Serialize, Deserialize)] pub struct RedisJob { /// The job context - pub ctx: Context, + ctx: Context, /// The inner job - pub job: J, + job: J, +} + +impl RedisJob { + /// Creates a new RedisJob. + pub fn new(job: J, ctx: Context) -> Self { + RedisJob { ctx, job } + } + + /// Gets a reference to the context. + pub fn ctx(&self) -> &Context { + &self.ctx + } + + /// Gets a mutable reference to the context. + pub fn ctx_mut(&mut self) -> &mut Context { + &mut self.ctx + } + + /// Sets the context. + pub fn set_ctx(&mut self, ctx: Context) { + self.ctx = ctx; + } + + /// Gets a reference to the job. + pub fn job(&self) -> &J { + &self.job + } + + /// Gets a mutable reference to the job. + pub fn job_mut(&mut self) -> &mut J { + &mut self.job + } + + /// Sets the job. + pub fn set_job(&mut self, job: J) { + self.job = job; + } + + /// Combines context and job into a tuple. + pub fn into_tuple(self) -> (Context, J) { + (self.ctx, self.job) + } } impl From> for Request { diff --git a/packages/apalis-sql/src/from_row.rs b/packages/apalis-sql/src/from_row.rs index b14c675e..00d748f7 100644 --- a/packages/apalis-sql/src/from_row.rs +++ b/packages/apalis-sql/src/from_row.rs @@ -6,8 +6,50 @@ use crate::context::SqlContext; /// Wrapper for [Request] #[derive(Debug, Clone)] pub struct SqlRequest { - pub(crate) req: T, - pub(crate) context: SqlContext, + req: T, + context: SqlContext, +} + +impl SqlRequest { + /// Creates a new SqlRequest. + pub fn new(req: T, context: SqlContext) -> Self { + SqlRequest { req, context } + } + + /// Gets a reference to the request. + pub fn req(&self) -> &T { + &self.req + } + + /// Gets a mutable reference to the request. + pub fn req_mut(&mut self) -> &mut T { + &mut self.req + } + + /// Sets the request. + pub fn set_req(&mut self, req: T) { + self.req = req; + } + + /// Gets a reference to the context. + pub fn context(&self) -> &SqlContext { + &self.context + } + + /// Gets a mutable reference to the context. + pub fn context_mut(&mut self) -> &mut SqlContext { + &mut self.context + } + + /// Sets the context. + pub fn set_context(&mut self, context: SqlContext) { + self.context = context; + } + + /// Combines request and context into a tuple. + pub fn into_tuple(self) -> (T, SqlContext) { + (self.req, self.context) + } } impl From> for Request { diff --git a/packages/apalis-sql/src/mysql.rs b/packages/apalis-sql/src/mysql.rs index 2abaa17b..7a2be327 100644 --- a/packages/apalis-sql/src/mysql.rs +++ b/packages/apalis-sql/src/mysql.rs @@ -10,7 +10,7 @@ use apalis_core::storage::Storage; use apalis_core::task::namespace::Namespace; use apalis_core::task::task_id::TaskId; use apalis_core::worker::WorkerId; -use apalis_core::{Backend, Codec}; +use apalis_core::{Backend, BoxCodec}; use async_stream::try_stream; use futures::{Stream, StreamExt, TryStreamExt}; use log::error; @@ -38,7 +38,7 @@ pub struct MysqlStorage { job_type: PhantomData, controller: Controller, config: Config, - codec: Arc + Sync + Send + 'static>>, + codec: BoxCodec, ack_notify: Notify<(WorkerId, TaskId)>, } @@ -109,6 +109,11 @@ impl MysqlStorage { pub fn pool(&self) -> &Pool { &self.pool } + + /// Expose the codec + pub fn codec(&self) -> &BoxCodec { + &self.codec + } } impl MysqlStorage { @@ -159,13 +164,18 @@ impl MysqlStorage { let jobs: Vec> = query.fetch_all(&pool).await?; for job in jobs { - yield Some(Into::into(SqlRequest { - context: job.context, - req: self.codec.decode(&job.req).map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))? - })).map(|mut req: Request| { + yield { + let (req, ctx) = job.into_tuple(); + let req = self + .codec + .decode(&req) + .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e))) + .unwrap(); + let req = SqlRequest::new(req, ctx); + let mut req: Request = req.into(); req.insert(Namespace(config.namespace.clone())); - req - }) + Some(req) + } } } } @@ -261,15 +271,17 @@ where .await?; match res { None => Ok(None), - Some(c) => Ok(Some( - SqlRequest { - context: c.context, - req: self.codec.decode(&c.req).map_err(|e| { - sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)) - })?, - } - .into(), - )), + Some(job) => Ok(Some({ + let (req, ctx) = job.into_tuple(); + let req = self + .codec + .decode(&req) + .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; + let req = SqlRequest::new(req, ctx); + let mut req: Request = req.into(); + req.insert(Namespace(self.config.namespace.clone())); + req + })), } } diff --git a/packages/apalis-sql/src/postgres.rs b/packages/apalis-sql/src/postgres.rs index 4dc73c02..53f3e32c 100644 --- a/packages/apalis-sql/src/postgres.rs +++ b/packages/apalis-sql/src/postgres.rs @@ -52,7 +52,7 @@ use apalis_core::storage::Storage; use apalis_core::task::namespace::Namespace; use apalis_core::task::task_id::TaskId; use apalis_core::worker::WorkerId; -use apalis_core::{Backend, Codec}; +use apalis_core::{Backend, BoxCodec}; use futures::channel::mpsc; use futures::StreamExt; use futures::{select, stream, SinkExt}; @@ -79,14 +79,7 @@ use crate::from_row::SqlRequest; pub struct PostgresStorage { pool: PgPool, job_type: PhantomData, - codec: Arc< - Box< - dyn Codec - + Sync - + Send - + 'static, - >, - >, + codec: BoxCodec, config: Config, controller: Controller, ack_notify: Notify>, @@ -259,6 +252,16 @@ impl PostgresStorage { pub fn pool(&self) -> &Pool { &self.pool } + + /// Expose the config + pub fn config(&self) -> &Config { + &self.config + } + + /// Expose the codec + pub fn codec(&self) -> &BoxCodec { + &self.codec + } } /// A listener that listens to Postgres notifications @@ -323,7 +326,6 @@ impl PgListen { impl PostgresStorage { async fn fetch_next(&mut self, worker_id: &WorkerId) -> Result>, sqlx::Error> { let config = &self.config; - let codec = &self.codec; let job_type = &config.namespace; let fetch_query = "Select * from apalis.get_jobs($1, $2, $3);"; let jobs: Vec> = sqlx::query_as(fetch_query) @@ -339,15 +341,15 @@ impl PostgresStorage { let jobs: Vec<_> = jobs .into_iter() .map(|job| { - let req = SqlRequest { - context: job.context, - req: codec - .decode(&job.req) - .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e))) - .unwrap(), - }; + let (req, ctx) = job.into_tuple(); + let req = self + .codec + .decode(&req) + .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e))) + .unwrap(); + let req = SqlRequest::new(req, ctx); let mut req: Request = req.into(); - req.insert(Namespace(config.namespace.clone())); + req.insert(Namespace(self.config.namespace.clone())); req }) .collect(); @@ -445,17 +447,21 @@ where .bind(job_id.to_string()) .fetch_optional(&self.pool) .await?; + match res { None => Ok(None), - Some(c) => Ok(Some( - SqlRequest { - context: c.context, - req: self.codec.decode(&c.req).map_err(|e| { - sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)) - })?, - } - .into(), - )), + Some(job) => Ok(Some({ + let (req, ctx) = job.into_tuple(); + let req = self + .codec + .decode(&req) + .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e))) + .unwrap(); + let req = SqlRequest::new(req, ctx); + let mut req: Request = req.into(); + req.insert(Namespace(self.config.namespace.clone())); + req + })), } } diff --git a/packages/apalis-sql/src/sqlite.rs b/packages/apalis-sql/src/sqlite.rs index 3e31a64d..6a3e25dd 100644 --- a/packages/apalis-sql/src/sqlite.rs +++ b/packages/apalis-sql/src/sqlite.rs @@ -12,7 +12,7 @@ use apalis_core::storage::Storage; use apalis_core::task::namespace::Namespace; use apalis_core::task::task_id::TaskId; use apalis_core::worker::WorkerId; -use apalis_core::{Backend, Codec}; +use apalis_core::{Backend, BoxCodec}; use async_stream::try_stream; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use serde::{de::DeserializeOwned, Serialize}; @@ -31,7 +31,7 @@ pub use sqlx::sqlite::SqlitePool; /// The code used to encode Sqlite jobs. /// /// Currently uses JSON -pub type SqliteCodec = Arc + Sync + Send + 'static>>; +pub type SqliteCodec = BoxCodec; /// Represents a [Storage] that persists to Sqlite // #[derive(Debug)] @@ -200,22 +200,19 @@ impl SqliteStorage { let res = fetch_next(&pool, &worker_id, id.0, &config).await?; yield match res { None => None::>, - Some(c) => Some( - SqlRequest { - context: c.context, - req: codec.decode(&c.req).map_err(|e| { - sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)) - })?, - } - .into(), - ).map(|mut req: Request| { + Some(job) => { + let (req, ctx) = job.into_tuple(); + let req = codec + .decode(&req) + .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e))) + .unwrap(); + let req = SqlRequest::new(req, ctx); + let mut req: Request = req.into(); req.insert(Namespace(config.namespace.clone())); - req - }), + Some(req) + } } - - .map(Into::into); - } + }; } } } @@ -280,15 +277,18 @@ where .await?; match res { None => Ok(None), - Some(c) => Ok(Some( - SqlRequest { - context: c.context, - req: self.codec.decode(&c.req).map_err(|e| { - sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)) - })?, - } - .into(), - )), + Some(job) => Ok(Some({ + let (req, ctx) = job.into_tuple(); + let req = self + .codec + .decode(&req) + .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e))) + .unwrap(); + let req = SqlRequest::new(req, ctx); + let mut req: Request = req.into(); + req.insert(Namespace(self.config.namespace.clone())); + req + })), } } From 9e71559805a05943cb6640124f059031c7f13842 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Wed, 10 Jul 2024 11:06:55 +0300 Subject: [PATCH 25/59] bump: to new version (#362) --- Cargo.toml | 4 ++-- examples/cron/Cargo.toml | 2 +- packages/apalis-core/Cargo.toml | 2 +- packages/apalis-cron/Cargo.toml | 4 ++-- packages/apalis-redis/Cargo.toml | 4 ++-- packages/apalis-sql/Cargo.toml | 4 ++-- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 16815955..4c3dc410 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis" -version = "0.6.0-rc.1" +version = "0.6.0-rc.2" authors = ["Geoffrey Mureithi "] description = "Simple, extensible multithreaded background job processing for Rust" repository = "https://github.com/geofmureithi/apalis" @@ -51,7 +51,7 @@ layers = [ docsrs = ["document-features"] [dependencies.apalis-core] -version = "0.6.0-rc.1" +version = "0.6.0-rc.2" default-features = false path = "./packages/apalis-core" diff --git a/examples/cron/Cargo.toml b/examples/cron/Cargo.toml index f187e27c..070ac495 100644 --- a/examples/cron/Cargo.toml +++ b/examples/cron/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "cron" +name = "cron-example" version = "0.1.0" edition = "2021" diff --git a/packages/apalis-core/Cargo.toml b/packages/apalis-core/Cargo.toml index a873b9c6..398d48d7 100644 --- a/packages/apalis-core/Cargo.toml +++ b/packages/apalis-core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-core" -version = "0.6.0-rc.1" +version = "0.6.0-rc.2" authors = ["Njuguna Mureithi "] edition = "2021" license = "MIT" diff --git a/packages/apalis-cron/Cargo.toml b/packages/apalis-cron/Cargo.toml index 8eeeb6aa..c31505da 100644 --- a/packages/apalis-cron/Cargo.toml +++ b/packages/apalis-cron/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-cron" -version = "0.6.0-rc.1" +version = "0.6.0-rc.2" edition = "2021" authors = ["Njuguna Mureithi "] license = "MIT" @@ -9,7 +9,7 @@ description = "A simple yet extensible library for cron-like job scheduling for # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.1", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.2", default-features = false, features = [ "sleep", ] } cron = "0.12.1" diff --git a/packages/apalis-redis/Cargo.toml b/packages/apalis-redis/Cargo.toml index 500f321a..d2dd41aa 100644 --- a/packages/apalis-redis/Cargo.toml +++ b/packages/apalis-redis/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-redis" -version = "0.6.0-rc.1" +version = "0.6.0-rc.2" authors = ["Njuguna Mureithi "] edition = "2021" readme = "../../README.md" @@ -11,7 +11,7 @@ description = "Redis Storage for apalis: use Redis for background jobs and messa # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.1", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.2", default-features = false, features = [ "sleep", "json", ] } diff --git a/packages/apalis-sql/Cargo.toml b/packages/apalis-sql/Cargo.toml index 5d5af803..01d01002 100644 --- a/packages/apalis-sql/Cargo.toml +++ b/packages/apalis-sql/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-sql" -version = "0.6.0-rc.1" +version = "0.6.0-rc.2" authors = ["Njuguna Mureithi "] edition = "2021" readme = "../../README.md" @@ -25,7 +25,7 @@ features = ["chrono"] [dependencies] serde = { version = "1", features = ["derive"] } serde_json = "1" -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.1", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.2", default-features = false, features = [ "sleep", "json", ] } From 369d8ecd207d18e32c6a8e9bad94693d5562326d Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Fri, 12 Jul 2024 23:28:51 +0300 Subject: [PATCH 26/59] Make Config accessible publicly (#364) * fix: add missing exposed config * fix: add getters --- packages/apalis-sql/src/lib.rs | 45 +++++++++++++++++++++++++++---- packages/apalis-sql/src/mysql.rs | 5 ++++ packages/apalis-sql/src/sqlite.rs | 5 ++++ 3 files changed, 50 insertions(+), 5 deletions(-) diff --git a/packages/apalis-sql/src/lib.rs b/packages/apalis-sql/src/lib.rs index e74c7a5a..6ddf5c2b 100644 --- a/packages/apalis-sql/src/lib.rs +++ b/packages/apalis-sql/src/lib.rs @@ -56,13 +56,13 @@ impl Default for Config { impl Config { /// Create a new config with a jobs namespace pub fn new(namespace: &str) -> Self { - Config::default().namespace(namespace) + Config::default().set_namespace(namespace) } /// Interval between database poll queries /// /// Defaults to 30ms - pub fn poll_interval(mut self, interval: Duration) -> Self { + pub fn set_poll_interval(mut self, interval: Duration) -> Self { self.poll_interval = interval; self } @@ -70,7 +70,7 @@ impl Config { /// Interval between worker keep-alive database updates /// /// Defaults to 30s - pub fn keep_alive(mut self, keep_alive: Duration) -> Self { + pub fn set_keep_alive(mut self, keep_alive: Duration) -> Self { self.keep_alive = keep_alive; self } @@ -78,7 +78,7 @@ impl Config { /// Buffer size to use when querying for jobs /// /// Defaults to 10 - pub fn buffer_size(mut self, buffer_size: usize) -> Self { + pub fn set_buffer_size(mut self, buffer_size: usize) -> Self { self.buffer_size = buffer_size; self } @@ -86,8 +86,43 @@ impl Config { /// Set the namespace to consume and push jobs to /// /// Defaults to "apalis::sql" - pub fn namespace(mut self, namespace: &str) -> Self { + pub fn set_namespace(mut self, namespace: &str) -> Self { self.namespace = namespace.to_owned(); self } + + /// Gets a reference to the keep_alive duration. + pub fn keep_alive(&self) -> &Duration { + &self.keep_alive + } + + /// Gets a mutable reference to the keep_alive duration. + pub fn keep_alive_mut(&mut self) -> &mut Duration { + &mut self.keep_alive + } + + /// Gets the buffer size. + pub fn buffer_size(&self) -> usize { + self.buffer_size + } + + /// Gets a reference to the poll_interval duration. + pub fn poll_interval(&self) -> &Duration { + &self.poll_interval + } + + /// Gets a mutable reference to the poll_interval duration. + pub fn poll_interval_mut(&mut self) -> &mut Duration { + &mut self.poll_interval + } + + /// Gets a reference to the namespace. + pub fn namespace(&self) -> &String { + &self.namespace + } + + /// Gets a mutable reference to the namespace. + pub fn namespace_mut(&mut self) -> &mut String { + &mut self.namespace + } } diff --git a/packages/apalis-sql/src/mysql.rs b/packages/apalis-sql/src/mysql.rs index 7a2be327..b3ae4694 100644 --- a/packages/apalis-sql/src/mysql.rs +++ b/packages/apalis-sql/src/mysql.rs @@ -114,6 +114,11 @@ impl MysqlStorage { pub fn codec(&self) -> &BoxCodec { &self.codec } + + /// Get the config used by the storage + pub fn get_config(&self) -> &Config { + &self.config + } } impl MysqlStorage { diff --git a/packages/apalis-sql/src/sqlite.rs b/packages/apalis-sql/src/sqlite.rs index 6a3e25dd..751cbf04 100644 --- a/packages/apalis-sql/src/sqlite.rs +++ b/packages/apalis-sql/src/sqlite.rs @@ -149,6 +149,11 @@ impl SqliteStorage { pub fn codec(&self) -> &SqliteCodec { &self.codec } + + /// Get the config used by the storage + pub fn get_config(&self) -> &Config { + &self.config + } } async fn fetch_next( From d70e479c9242af68dceb736ebf22b73a2dfdadc1 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Fri, 12 Jul 2024 23:57:46 +0300 Subject: [PATCH 27/59] fix: die if retries is zero (#365) --- src/layers/retry/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/layers/retry/mod.rs b/src/layers/retry/mod.rs index 982fabad..53db3df3 100644 --- a/src/layers/retry/mod.rs +++ b/src/layers/retry/mod.rs @@ -46,6 +46,7 @@ where None } Err(_) if (self.retries - ctx.current() > 0) => Some(future::ready(self.clone())), + Err(_) if self.retries == 0 => None, Err(_) => None, } } From 97ff3489c38c1996d92610cd39cfd0d7c501d2c7 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Sat, 13 Jul 2024 08:39:56 +0300 Subject: [PATCH 28/59] Feature: Add a layer that catches panics (#366) * Feature: Add a layer that catches panics This allows preventing job execution from killing workers and returns an error containing the backtrace * fix: backtrace as it may be different * add: example for catch-panic * fix: make not default --- Cargo.toml | 4 + README.md | 1 + examples/basics/Cargo.toml | 2 +- examples/basics/src/main.rs | 7 +- src/layers/catch_panic/mod.rs | 181 ++++++++++++++++++++++++++++++++++ src/layers/mod.rs | 5 + 6 files changed, 198 insertions(+), 2 deletions(-) create mode 100644 src/layers/catch_panic/mod.rs diff --git a/Cargo.toml b/Cargo.toml index 4c3dc410..4e719d5f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,6 +33,8 @@ timeout = ["tower/timeout"] limit = ["tower/limit"] ## Support filtering jobs based on a predicate filter = ["tower/filter"] +## Captures panics in executions and convert them to errors +catch-panic = ["dep:backtrace"] ## Compatibility with async-std and smol runtimes async-std-comp = ["async-std"] ## Compatibility with tokio and actix runtimes @@ -46,6 +48,7 @@ layers = [ "timeout", "limit", "filter", + "catch-panic", ] docsrs = ["document-features"] @@ -134,6 +137,7 @@ pin-project-lite = "0.2.14" uuid = { version = "1.8", optional = true } ulid = { version = "1", optional = true } serde = { version = "1.0", features = ["derive"] } +backtrace = { version = "0.3", optional = true } [dependencies.tracing] default-features = false diff --git a/README.md b/README.md index 663549cc..0b2234d7 100644 --- a/README.md +++ b/README.md @@ -126,6 +126,7 @@ async fn produce_route_jobs(storage: &RedisStorage) -> Result<()> { - _timeout_ — Support timeouts on jobs - _limit_ — 💪 Limit the amount of jobs - _filter_ — Support filtering jobs based on a predicate +- _catch-panic_ - Catch panics that occur during execution ## Storage Comparison diff --git a/examples/basics/Cargo.toml b/examples/basics/Cargo.toml index d77bccbe..feade0b9 100644 --- a/examples/basics/Cargo.toml +++ b/examples/basics/Cargo.toml @@ -8,7 +8,7 @@ license = "MIT OR Apache-2.0" [dependencies] thiserror = "1" tokio = { version = "1", features = ["full"] } -apalis = { path = "../../", features = ["limit", "tokio-comp"] } +apalis = { path = "../../", features = ["limit", "tokio-comp", "catch-panic"] } apalis-sql = { path = "../../packages/apalis-sql" } serde = "1" tracing-subscriber = "0.3.11" diff --git a/examples/basics/src/main.rs b/examples/basics/src/main.rs index 67a78a33..e6b1f0cb 100644 --- a/examples/basics/src/main.rs +++ b/examples/basics/src/main.rs @@ -4,7 +4,10 @@ mod service; use std::time::Duration; -use apalis::{layers::tracing::TraceLayer, prelude::*}; +use apalis::{ + layers::{catch_panic::CatchPanicLayer, tracing::TraceLayer}, + prelude::*, +}; use apalis_sql::sqlite::{SqlitePool, SqliteStorage}; use email_service::Email; @@ -96,6 +99,8 @@ async fn main() -> Result<(), std::io::Error> { Monitor::::new() .register_with_count(2, { WorkerBuilder::new("tasty-banana") + // This handles any panics that may occur in any of the layers below + .layer(CatchPanicLayer::new()) .layer(TraceLayer::new()) .layer(LogLayer::new("some-log-example")) // Add shared context to all jobs executed by this worker diff --git a/src/layers/catch_panic/mod.rs b/src/layers/catch_panic/mod.rs new file mode 100644 index 00000000..f7b149bf --- /dev/null +++ b/src/layers/catch_panic/mod.rs @@ -0,0 +1,181 @@ +use std::fmt; +use std::future::Future; +use std::panic::{catch_unwind, AssertUnwindSafe}; +use std::pin::Pin; +use std::task::{Context, Poll}; + +use apalis_core::error::Error; +use apalis_core::request::Request; +use backtrace::Backtrace; +use tower::Layer; +use tower::Service; + +/// Apalis Layer that catches panics in the service. +#[derive(Clone, Debug)] +pub struct CatchPanicLayer; + +impl CatchPanicLayer { + /// Creates a new `CatchPanicLayer`. + pub fn new() -> Self { + CatchPanicLayer + } +} + +impl Default for CatchPanicLayer { + fn default() -> Self { + Self::new() + } +} + +impl Layer for CatchPanicLayer { + type Service = CatchPanicService; + + fn layer(&self, service: S) -> Self::Service { + CatchPanicService { service } + } +} + +/// Apalis Service that catches panics. +#[derive(Clone, Debug)] +pub struct CatchPanicService { + service: S, +} + +impl Service> for CatchPanicService +where + S: Service, Response = Res, Error = Error>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = CatchPanicFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.service.poll_ready(cx) + } + + fn call(&mut self, request: Request) -> Self::Future { + CatchPanicFuture { + future: self.service.call(request), + } + } +} + +pin_project_lite::pin_project! { + /// A wrapper that catches panics during execution + pub struct CatchPanicFuture { + #[pin] + future: F, + + } +} + +/// An error generated from a panic +#[derive(Debug, Clone)] +pub struct PanicError(pub String, pub Backtrace); + +impl std::error::Error for PanicError {} + +impl fmt::Display for PanicError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "PanicError: {}, Backtrace: {:?}", self.0, self.1) + } +} + +impl Future for CatchPanicFuture +where + F: Future>, +{ + type Output = Result; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + + match catch_unwind(AssertUnwindSafe(|| this.future.poll(cx))) { + Ok(res) => res, + Err(e) => { + let panic_info = if let Some(s) = e.downcast_ref::<&str>() { + s.to_string() + } else if let Some(s) = e.downcast_ref::() { + s.clone() + } else { + "Unknown panic".to_string() + }; + Poll::Ready(Err(Error::Failed(Box::new(PanicError( + panic_info, + Backtrace::new(), + ))))) + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use std::task::{Context, Poll}; + use tower::Service; + + #[derive(Clone, Debug)] + struct TestJob; + + #[derive(Clone)] + struct TestService; + + impl Service> for TestService { + type Response = usize; + type Error = Error; + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, _req: Request) -> Self::Future { + Box::pin(async { Ok(42) }) + } + } + + #[tokio::test] + async fn test_catch_panic_layer() { + let layer = CatchPanicLayer::new(); + let mut service = layer.layer(TestService); + + let request = Request::new(TestJob); + let response = service.call(request).await; + + assert!(response.is_ok()); + } + + #[tokio::test] + async fn test_catch_panic_layer_panics() { + struct PanicService; + + impl Service> for PanicService { + type Response = usize; + type Error = Error; + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, _req: Request) -> Self::Future { + Box::pin(async { None.unwrap() }) + } + } + + let layer = CatchPanicLayer::new(); + let mut service = layer.layer(PanicService); + + let request = Request::new(TestJob); + let response = service.call(request).await; + + assert!(response.is_err()); + + assert_eq!( + response.unwrap_err().to_string()[0..87], + *"Task Failed: PanicError: called `Option::unwrap()` on a `None` value, Backtrace: 0: " + ); + } +} diff --git a/src/layers/mod.rs b/src/layers/mod.rs index e7b5e99e..f990573a 100644 --- a/src/layers/mod.rs +++ b/src/layers/mod.rs @@ -25,3 +25,8 @@ pub mod limit { #[cfg(feature = "timeout")] #[cfg_attr(docsrs, doc(cfg(feature = "timeout")))] pub use tower::timeout::TimeoutLayer; + +/// catch panic middleware for apalis +#[cfg(feature = "catch-panic")] +#[cfg_attr(docsrs, doc(cfg(feature = "catch-panic")))] +pub mod catch_panic; From 4ad94d21423062e13fc0510cf9c8822912252abf Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Sat, 13 Jul 2024 13:45:11 +0300 Subject: [PATCH 29/59] Feature: Save results for storages (#369) * Feature: Save results for storages Currently just the status is stored, this PR adds the ability to save the result * fix: result from storage * fix: kill and abort issue --- examples/redis/src/main.rs | 2 +- packages/apalis-core/src/error.rs | 16 ++-- packages/apalis-core/src/layers.rs | 47 +++++---- .../lua/{ack_job.lua => done_job.lua} | 5 +- packages/apalis-redis/lua/kill_job.lua | 16 ++-- packages/apalis-redis/lua/retry_job.lua | 11 ++- packages/apalis-redis/src/storage.rs | 95 ++++++++++++------- packages/apalis-sql/src/context.rs | 2 +- packages/apalis-sql/src/lib.rs | 14 +++ packages/apalis-sql/src/mysql.rs | 35 ++++--- packages/apalis-sql/src/postgres.rs | 18 ++-- packages/apalis-sql/src/sqlite.rs | 14 +-- src/layers/catch_panic/mod.rs | 2 +- 13 files changed, 169 insertions(+), 108 deletions(-) rename packages/apalis-redis/lua/{ack_job.lua => done_job.lua} (73%) diff --git a/examples/redis/src/main.rs b/examples/redis/src/main.rs index 8fadcd52..3a352783 100644 --- a/examples/redis/src/main.rs +++ b/examples/redis/src/main.rs @@ -47,7 +47,7 @@ async fn main() -> Result<()> { produce_jobs(storage.clone()).await?; let worker = WorkerBuilder::new("rango-tango") - .chain(|svc| svc.map_err(|e| Error::Failed(e))) + .chain(|svc| svc.map_err(Error::Failed)) .layer(RateLimitLayer::new(5, Duration::from_secs(1))) .layer(TimeoutLayer::new(Duration::from_millis(500))) .data(Count::default()) diff --git a/packages/apalis-core/src/error.rs b/packages/apalis-core/src/error.rs index 7ba3cf56..0f526999 100644 --- a/packages/apalis-core/src/error.rs +++ b/packages/apalis-core/src/error.rs @@ -11,23 +11,27 @@ pub type BoxDynError = Box; #[non_exhaustive] pub enum Error { /// An error occurred during execution. - #[error("Task Failed: {0}")] + #[error("FailedError: {0}")] Failed(#[source] BoxDynError), /// A generic IO error - #[error("IO error: {0}")] + #[error("IoError: {0}")] Io(#[from] std::io::Error), /// Missing some context and yet it was requested during execution. - #[error("MissingContext: {0}")] - InvalidContext(String), + #[error("MissingContextError: {0}")] + MissingContext(String), /// Execution was aborted - #[error("Execution was aborted")] + #[error("AbortError")] Abort, + /// Execution failed and job will be retried + #[error("RetryError: {0}")] + Retry(#[source] BoxDynError), + /// Encountered an error during worker execution - #[error("Encountered an error during worker execution")] + #[error("WorkerError: {0}")] WorkerError(WorkerError), #[doc(hidden)] diff --git a/packages/apalis-core/src/layers.rs b/packages/apalis-core/src/layers.rs index bdc504bb..29984a2d 100644 --- a/packages/apalis-core/src/layers.rs +++ b/packages/apalis-core/src/layers.rs @@ -1,14 +1,13 @@ +use crate::{request::Request, worker::WorkerId}; use futures::channel::mpsc::{SendError, Sender}; use futures::SinkExt; +use futures::{future::BoxFuture, Future, FutureExt}; use std::marker::PhantomData; use std::{fmt, sync::Arc}; pub use tower::{ layer::layer_fn, layer::util::Identity, util::BoxCloneService, Layer, Service, ServiceBuilder, }; -use crate::{request::Request, worker::WorkerId}; -use futures::{future::BoxFuture, Future, FutureExt}; - /// A generic layer that has been stripped off types. /// This is returned by a [crate::Backend] and can be used to customize the middleware of the service consuming tasks pub struct CommonLayer { @@ -154,7 +153,9 @@ pub mod extensions { } /// A trait for acknowledging successful processing -pub trait Ack { +/// This trait is called even when a task fails. +/// This is a way of a [`Backend`] to save the result of a job or message +pub trait Ack { /// The data to fetch from context to allow acknowledgement type Acknowledger; /// The error returned by the ack @@ -167,21 +168,21 @@ pub trait Ack { } /// ACK response -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct AckResponse { /// The worker id pub worker: WorkerId, /// The acknowledger pub acknowledger: A, /// The stringified result - pub result: String, + pub result: Result, } impl AckResponse { /// Output a json for the response pub fn to_json(&self) -> String { format!( - r#"{{"worker": "{}", "acknowledger": "{}", "result": "{}"}}"#, + r#"{{"worker": "{}", "acknowledger": "{}", "result": "{:?}"}}"#, self.worker, self.acknowledger, self.result ) } @@ -260,15 +261,15 @@ impl Clone for AckService { } } -impl Service> for AckService +impl Service> for AckService where - SV: Service> + Send + Sync + 'static, + SV: Service> + Send + Sync + 'static, SV::Error: std::error::Error + Send + Sync + 'static, - >>::Future: std::marker::Send + 'static, - A: Ack + Send + 'static + Clone + Send + Sync, - J: 'static, - >>::Response: std::marker::Send + fmt::Debug + Sync, - >::Acknowledger: Sync + Send + Clone, + >>::Future: std::marker::Send + 'static, + A: Ack + Send + 'static + Clone + Send + Sync, + T: 'static, + >>::Response: std::marker::Send + fmt::Debug + Sync, + >::Acknowledger: Sync + Send + Clone, { type Response = SV::Response; type Error = SV::Error; @@ -281,29 +282,33 @@ where self.service.poll_ready(cx) } - fn call(&mut self, request: Request) -> Self::Future { + fn call(&mut self, request: Request) -> Self::Future { let mut ack = self.ack.clone(); let worker_id = self.worker_id.clone(); - let data = request.get::<>::Acknowledger>().cloned(); + let data = request.get::<>::Acknowledger>().cloned(); let fut = self.service.call(request); let fut_with_ack = async move { let res = fut.await; + let result = res + .as_ref() + .map(|ok| format!("{ok:?}")) + .map_err(|e| e.to_string()); if let Some(task_id) = data { if let Err(_e) = ack .ack(AckResponse { worker: worker_id, acknowledger: task_id, - result: format!("{res:?}"), + result, }) .await { - // tracing::warn!("Acknowledgement Failed: {}", e); - // try get monitor, and emit + // TODO: Implement tracing in apalis core + // tracing::error!("Acknowledgement Failed: {}", e); } } else { - // tracing::warn!( + // tracing::error!( // "Acknowledgement could not be called due to missing ack data in context : {}", - // &std::any::type_name::<>::Acknowledger>() + // &std::any::type_name::<>::Acknowledger>() // ); } res diff --git a/packages/apalis-redis/lua/ack_job.lua b/packages/apalis-redis/lua/done_job.lua similarity index 73% rename from packages/apalis-redis/lua/ack_job.lua rename to packages/apalis-redis/lua/done_job.lua index a51b6a64..62be5a14 100644 --- a/packages/apalis-redis/lua/ack_job.lua +++ b/packages/apalis-redis/lua/done_job.lua @@ -1,17 +1,20 @@ -- KEYS[1]: this consumer's inflight set -- KEYS[2]: the done jobs set +-- KEYS[3]: the job data hash -- ARGV[1]: the job ID -- ARGV[2]: the current time +-- ARGV[3]: the result of the job -- Returns: bool -- Remove the job from this consumer's inflight set local removed = redis.call("srem", KEYS[1], ARGV[1]) - +local ns = "::result" if removed == 1 then -- Push the job on to the done jobs set redis.call("zadd", KEYS[2], ARGV[2], ARGV[1]) + redis.call("hmset", KEYS[3].. ns, ARGV[1], ARGV[3] ) return true end diff --git a/packages/apalis-redis/lua/kill_job.lua b/packages/apalis-redis/lua/kill_job.lua index 3bc7dd30..ebe8d684 100644 --- a/packages/apalis-redis/lua/kill_job.lua +++ b/packages/apalis-redis/lua/kill_job.lua @@ -1,24 +1,22 @@ -- KEYS[1]: this consumer's inflight set -- KEYS[2]: the dead jobs set -- KEYS[3]: the job data hash - -- ARGV[1]: the job ID -- ARGV[2]: the current time --- ARGV[3]: the serialized job data - +-- ARGV[3]: the result of the job -- Returns: nil - -- Remove the job from this consumer's inflight set local removed = redis.call("srem", KEYS[1], ARGV[1]) if removed == 1 then - -- Push the job on to the dead jobs set - redis.call("zadd", KEYS[2], ARGV[2], ARGV[1]) + -- Push the job on to the dead jobs set + redis.call("zadd", KEYS[2], ARGV[2], ARGV[1]) - -- Reset the job data - redis.call("hset", KEYS[3], ARGV[1], ARGV[3]) + -- Save the result of the job + local ns = "::result" + redis.call("hmset", KEYS[3] .. ns, ARGV[1], ARGV[3]) - return 1 + return 1 end return 0 diff --git a/packages/apalis-redis/lua/retry_job.lua b/packages/apalis-redis/lua/retry_job.lua index 6d13b8da..e3df0e76 100644 --- a/packages/apalis-redis/lua/retry_job.lua +++ b/packages/apalis-redis/lua/retry_job.lua @@ -4,7 +4,7 @@ -- ARGV[1]: the job ID -- ARGV[2]: the time at which to retry --- ARGV[3]: the serialized job data +-- ARGV[3]: the result of the job -- Returns: nil @@ -15,8 +15,15 @@ if removed == 1 then -- Push the job on to the scheduled set redis.call("zadd", KEYS[2], ARGV[2], ARGV[1]) + local job = redis.call('HGET', KEYS[3], ARGV[1]) + -- Reset the job data - redis.call("hset", KEYS[3], ARGV[1], ARGV[3]) + redis.call("hset", KEYS[3], ARGV[1], job) + + -- Save the result of the job + local ns = "::result" + redis.call("hmset", KEYS[3].. ns, ARGV[1], ARGV[4] ) + end return removed diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index 008c7f18..f7510baf 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -78,7 +78,7 @@ pub struct RedisQueueInfo { #[derive(Clone, Debug)] struct RedisScript { - ack_job: Script, + done_job: Script, enqueue_scheduled: Script, get_jobs: Script, kill_job: Script, @@ -415,7 +415,7 @@ impl RedisStorage { config, codec: Arc::new(Box::new(codec)), scripts: RedisScript { - ack_job: redis::Script::new(include_str!("../lua/ack_job.lua")), + done_job: redis::Script::new(include_str!("../lua/done_job.lua")), push_job: redis::Script::new(include_str!("../lua/push_job.lua")), retry_job: redis::Script::new(include_str!("../lua/retry_job.lua")), enqueue_scheduled: redis::Script::new(include_str!( @@ -535,19 +535,59 @@ impl Ack type Acknowledger = TaskId; type Error = RedisError; async fn ack(&mut self, res: AckResponse) -> Result<(), RedisError> { - let ack_job = self.scripts.ack_job.clone(); let inflight_set = format!("{}:{}", self.config.inflight_jobs_set(), res.worker); - let done_jobs_set = &self.config.done_jobs_set(); - let now: i64 = res.acknowledger.inner().timestamp_ms().try_into().unwrap(); + let now: i64 = Utc::now().timestamp(); - ack_job - .key(inflight_set) - .key(done_jobs_set) - .arg(res.acknowledger.to_string()) - .arg(now) - .invoke_async(&mut self.conn) - .await + match res.result { + Ok(success_res) => { + let done_job = self.scripts.done_job.clone(); + let done_jobs_set = &self.config.done_jobs_set(); + done_job + .key(inflight_set) + .key(done_jobs_set) + .key(self.config.job_data_hash()) + .arg(res.acknowledger.to_string()) + .arg(now) + .arg(success_res) + .invoke_async(&mut self.conn) + .await + } + Err(e) => match e { + e if e.contains("BackoffRetry") => { + //do nothing, should be handled by BackoffLayer + Ok(()) + } + + e if e.starts_with("RetryError") => { + let retry_job = self.scripts.retry_job.clone(); + let retry_jobs_set = &self.config.scheduled_jobs_set(); + retry_job + .key(inflight_set) + .key(retry_jobs_set) + .key(self.config.job_data_hash()) + .arg(res.acknowledger.to_string()) + .arg(now) + .arg(e) + .invoke_async(&mut self.conn) + .await + } + + _ => { + let kill_job = self.scripts.kill_job.clone(); + let kill_jobs_set = &self.config.dead_jobs_set(); + kill_job + .key(inflight_set) + .key(kill_jobs_set) + .key(self.config.job_data_hash()) + .arg(res.acknowledger.to_string()) + .arg(now) + .arg(e) + .invoke_async(&mut self.conn) + .await + } + }, + } } } @@ -862,27 +902,16 @@ impl RedisStorage { let current_worker_id = format!("{}:{}", self.config.inflight_jobs_set(), worker_id); let job_data_hash = self.config.job_data_hash(); let dead_jobs_set = self.config.dead_jobs_set(); - let fetch_job = self.fetch_by_id(task_id); let now: i64 = Utc::now().timestamp(); - let res = fetch_job.await?; - match res { - Some(job) => { - let data = self - .codec - .encode(&job.try_into()?) - .map_err(|e| (ErrorKind::IoError, "Encode error", e.to_string()))?; - kill_job - .key(current_worker_id) - .key(dead_jobs_set) - .key(job_data_hash) - .arg(task_id.to_string()) - .arg(now) - .arg(data) - .invoke_async(&mut self.conn) - .await - } - None => Err(RedisError::from((ErrorKind::ResponseError, "Id not found"))), - } + kill_job + .key(current_worker_id) + .key(dead_jobs_set) + .key(job_data_hash) + .arg(task_id.to_string()) + .arg(now) + .arg("AbortError") + .invoke_async(&mut self.conn) + .await } /// Required to add scheduled jobs to the active set @@ -1051,7 +1080,7 @@ mod tests { storage .ack(AckResponse { acknowledger: job_id.clone(), - result: "Success".to_string(), + result: Ok("Success".to_string()), worker: worker_id.clone(), }) .await diff --git a/packages/apalis-sql/src/context.rs b/packages/apalis-sql/src/context.rs index 5bc1b135..a0774993 100644 --- a/packages/apalis-sql/src/context.rs +++ b/packages/apalis-sql/src/context.rs @@ -162,7 +162,7 @@ impl FromStr for State { "Retry" => Ok(State::Retry), "Failed" => Ok(State::Failed), "Killed" => Ok(State::Killed), - _ => Err(Error::InvalidContext("Invalid Job state".to_string())), + _ => Err(Error::MissingContext("Invalid Job state".to_string())), } } } diff --git a/packages/apalis-sql/src/lib.rs b/packages/apalis-sql/src/lib.rs index 6ddf5c2b..b5d21133 100644 --- a/packages/apalis-sql/src/lib.rs +++ b/packages/apalis-sql/src/lib.rs @@ -12,6 +12,8 @@ use std::time::Duration; +use context::State; + /// The context of the sql job pub mod context; /// Util for fetching rows @@ -126,3 +128,15 @@ impl Config { &mut self.namespace } } + +/// Calculates the status from a result +pub(crate) fn calculate_status(res: &Result) -> State { + match res { + Ok(_) => State::Done, + Err(e) => match &e { + _ if e.starts_with("RetryError") => State::Retry, + _ if e.starts_with("AbortError") => State::Killed, + _ => State::Failed, + }, + } +} diff --git a/packages/apalis-sql/src/mysql.rs b/packages/apalis-sql/src/mysql.rs index b3ae4694..1dcbdb28 100644 --- a/packages/apalis-sql/src/mysql.rs +++ b/packages/apalis-sql/src/mysql.rs @@ -27,7 +27,7 @@ use std::{marker::PhantomData, ops::Add, time::Duration}; use crate::context::SqlContext; use crate::from_row::SqlRequest; -use crate::Config; +use crate::{calculate_status, Config}; pub use sqlx::mysql::MySqlPool; @@ -39,7 +39,7 @@ pub struct MysqlStorage { controller: Controller, config: Config, codec: BoxCodec, - ack_notify: Notify<(WorkerId, TaskId)>, + ack_notify: Notify>, } impl fmt::Debug for MysqlStorage { @@ -395,22 +395,19 @@ impl Backend = ids.iter().map(|c| c.0.to_string()).collect(); - let task_ids: Vec = ids.iter().map(|c| c.1.to_string()).collect(); - let id_params = format!("?{}", ", ?".repeat(task_ids.len() - 1)); - let worker_params = format!("?{}", ", ?".repeat(worker_ids.len() - 1)); - let query = - format!("UPDATE jobs SET status = 'Done', done_at = now() WHERE id IN ( { } ) AND lock_by IN ( { } )", id_params, worker_params); - let mut query = sqlx::query(&query); - for i in task_ids { - query = query.bind(i); - } - for i in worker_ids { - query = query.bind(i); - } - if let Err(e) = query.execute(&pool).await { - error!("Ack failed: {e}"); + for id in ids { + let query = "UPDATE jobs SET status = ?, done_at = now(), last_error = ? WHERE id = ? AND lock_by = ?"; + let query = sqlx::query(query); + let query = query + .bind(calculate_status(&id.result).to_string()) + .bind(serde_json::to_string(&id.result).unwrap()) + .bind(id.acknowledger.to_string()) + .bind(id.worker.to_string()); + if let Err(e) = query.execute(&pool).await { + error!("Ack failed: {e}"); + } } + apalis_core::sleep(config.poll_interval).await; } }; @@ -439,7 +436,7 @@ impl Ack for MysqlStorage { type Error = sqlx::Error; async fn ack(&mut self, response: AckResponse) -> Result<(), sqlx::Error> { self.ack_notify - .notify((response.worker.clone(), response.acknowledger.clone())) + .notify(response) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::BrokenPipe, e)))?; Ok(()) @@ -646,7 +643,7 @@ mod tests { storage .ack(AckResponse { acknowledger: job_id.clone(), - result: "Success".to_string(), + result: Ok("Success".to_string()), worker: worker_id.clone(), }) .await diff --git a/packages/apalis-sql/src/postgres.rs b/packages/apalis-sql/src/postgres.rs index 53f3e32c..773ee453 100644 --- a/packages/apalis-sql/src/postgres.rs +++ b/packages/apalis-sql/src/postgres.rs @@ -39,7 +39,7 @@ //! } //! ``` use crate::context::SqlContext; -use crate::Config; +use crate::{calculate_status, Config}; use apalis_core::codec::json::JsonCodec; use apalis_core::error::Error; use apalis_core::layers::{Ack, AckLayer, AckResponse}; @@ -174,14 +174,16 @@ impl Backend { if let Some(ids) = ids { - let worker_ids: Vec = ids.iter().map(|c| c.worker.to_string()).collect(); - let task_ids: Vec = ids.iter().map(|c| c.acknowledger.to_string()).collect(); - + let ack_ids: Vec<(String, String, String, String)> = ids.iter().map(|c| { + (c.acknowledger.to_string(), c.worker.to_string(), serde_json::to_string(&c.result).unwrap(), calculate_status(&c.result).to_string()) + }).collect(); let query = - "UPDATE apalis.jobs SET status = 'Done', done_at = now() WHERE id = ANY($1::text[]) AND lock_by = ANY($2::text[])"; + "UPDATE apalis.jobs SET status = Q.status, done_at = now(), lock_by = Q.lock_by, last_error = Q.result FROM ( + SELECT(value-->0)::text as id, (value->>1)::text as worker_id, (value->>2)::text as result, (value->>3)::text as status FROM json_array_elements($1) + ) Q + WHERE id = Q.id"; if let Err(e) = sqlx::query(query) - .bind(task_ids) - .bind(worker_ids) + .bind(serde_json::to_string(&ack_ids).unwrap()) .execute(&pool) .await { @@ -725,7 +727,7 @@ mod tests { storage .ack(AckResponse { acknowledger: job_id.clone(), - result: "Success".to_string(), + result: Ok("Success".to_string()), worker: worker_id.clone(), }) .await diff --git a/packages/apalis-sql/src/sqlite.rs b/packages/apalis-sql/src/sqlite.rs index 751cbf04..c6b7d4cd 100644 --- a/packages/apalis-sql/src/sqlite.rs +++ b/packages/apalis-sql/src/sqlite.rs @@ -1,5 +1,5 @@ use crate::context::SqlContext; -use crate::Config; +use crate::{calculate_status, Config}; use apalis_core::codec::json::JsonCodec; use apalis_core::error::Error; @@ -209,8 +209,7 @@ impl SqliteStorage { let (req, ctx) = job.into_tuple(); let req = codec .decode(&req) - .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e))) - .unwrap(); + .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; let req = SqlRequest::new(req, ctx); let mut req: Request = req.into(); req.insert(Namespace(config.namespace.clone())); @@ -485,11 +484,14 @@ impl Ack for SqliteStorage { async fn ack(&mut self, res: AckResponse) -> Result<(), sqlx::Error> { let pool = self.pool.clone(); let query = - "UPDATE Jobs SET status = 'Done', done_at = strftime('%s','now'), last_error = ?3 WHERE id = ?1 AND lock_by = ?2"; + "UPDATE Jobs SET status = ?4, done_at = strftime('%s','now'), last_error = ?3 WHERE id = ?1 AND lock_by = ?2"; + let result = serde_json::to_string(&res.result) + .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; sqlx::query(query) .bind(res.acknowledger.to_string()) .bind(res.worker.to_string()) - .bind(res.result) + .bind(result) + .bind(calculate_status(&res.result).to_string()) .execute(&pool) .await?; Ok(()) @@ -614,7 +616,7 @@ mod tests { storage .ack(AckResponse { acknowledger: job_id.clone(), - result: "Success".to_string(), + result: Ok("Success".to_string()), worker: worker_id.clone(), }) .await diff --git a/src/layers/catch_panic/mod.rs b/src/layers/catch_panic/mod.rs index f7b149bf..c9c17917 100644 --- a/src/layers/catch_panic/mod.rs +++ b/src/layers/catch_panic/mod.rs @@ -175,7 +175,7 @@ mod tests { assert_eq!( response.unwrap_err().to_string()[0..87], - *"Task Failed: PanicError: called `Option::unwrap()` on a `None` value, Backtrace: 0: " + *"FailedError: PanicError: called `Option::unwrap()` on a `None` value, Backtrace: 0: " ); } } From 144d7ef574276125e679cbbff81176150d3cf4e6 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Sat, 13 Jul 2024 13:56:21 +0300 Subject: [PATCH 30/59] Bump: to 0.6.0-rc.3 (#370) --- Cargo.toml | 4 ++-- packages/apalis-core/Cargo.toml | 2 +- packages/apalis-cron/Cargo.toml | 4 ++-- packages/apalis-redis/Cargo.toml | 4 ++-- packages/apalis-sql/Cargo.toml | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 4e719d5f..a1510edd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis" -version = "0.6.0-rc.2" +version = "0.6.0-rc.3" authors = ["Geoffrey Mureithi "] description = "Simple, extensible multithreaded background job processing for Rust" repository = "https://github.com/geofmureithi/apalis" @@ -54,7 +54,7 @@ layers = [ docsrs = ["document-features"] [dependencies.apalis-core] -version = "0.6.0-rc.2" +version = "0.6.0-rc.3" default-features = false path = "./packages/apalis-core" diff --git a/packages/apalis-core/Cargo.toml b/packages/apalis-core/Cargo.toml index 398d48d7..2a809bf9 100644 --- a/packages/apalis-core/Cargo.toml +++ b/packages/apalis-core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-core" -version = "0.6.0-rc.2" +version = "0.6.0-rc.3" authors = ["Njuguna Mureithi "] edition = "2021" license = "MIT" diff --git a/packages/apalis-cron/Cargo.toml b/packages/apalis-cron/Cargo.toml index c31505da..f6b42060 100644 --- a/packages/apalis-cron/Cargo.toml +++ b/packages/apalis-cron/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-cron" -version = "0.6.0-rc.2" +version = "0.6.0-rc.3" edition = "2021" authors = ["Njuguna Mureithi "] license = "MIT" @@ -9,7 +9,7 @@ description = "A simple yet extensible library for cron-like job scheduling for # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.2", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.3", default-features = false, features = [ "sleep", ] } cron = "0.12.1" diff --git a/packages/apalis-redis/Cargo.toml b/packages/apalis-redis/Cargo.toml index d2dd41aa..023ec645 100644 --- a/packages/apalis-redis/Cargo.toml +++ b/packages/apalis-redis/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-redis" -version = "0.6.0-rc.2" +version = "0.6.0-rc.3" authors = ["Njuguna Mureithi "] edition = "2021" readme = "../../README.md" @@ -11,7 +11,7 @@ description = "Redis Storage for apalis: use Redis for background jobs and messa # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.2", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.3", default-features = false, features = [ "sleep", "json", ] } diff --git a/packages/apalis-sql/Cargo.toml b/packages/apalis-sql/Cargo.toml index 01d01002..8e984f62 100644 --- a/packages/apalis-sql/Cargo.toml +++ b/packages/apalis-sql/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-sql" -version = "0.6.0-rc.2" +version = "0.6.0-rc.3" authors = ["Njuguna Mureithi "] edition = "2021" readme = "../../README.md" @@ -25,7 +25,7 @@ features = ["chrono"] [dependencies] serde = { version = "1", features = ["derive"] } serde_json = "1" -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.2", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.3", default-features = false, features = [ "sleep", "json", ] } From 95618ffcfa391a84055ce0495b672adff5ceefe7 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Sat, 13 Jul 2024 21:06:14 +0300 Subject: [PATCH 31/59] fix: serde for sql request (#371) * fix: serde for sql request * fix: serde for attempts * lint: fmt --- packages/apalis-core/src/task/attempt.rs | 43 +++++++++++++++++++++++- packages/apalis-sql/src/context.rs | 2 +- packages/apalis-sql/src/from_row.rs | 3 +- 3 files changed, 45 insertions(+), 3 deletions(-) diff --git a/packages/apalis-core/src/task/attempt.rs b/packages/apalis-core/src/task/attempt.rs index ba557c4b..3f4825a1 100644 --- a/packages/apalis-core/src/task/attempt.rs +++ b/packages/apalis-core/src/task/attempt.rs @@ -1,9 +1,50 @@ -use std::sync::{atomic::AtomicUsize, Arc}; +use std::sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, +}; + +use serde::{Deserialize, Deserializer, Serialize, Serializer}; /// A wrapper to keep count of the attempts tried by a task #[derive(Debug, Clone)] pub struct Attempt(Arc); +// Custom serialization function +fn serialize(attempt: &Attempt, serializer: S) -> Result +where + S: Serializer, +{ + let value = attempt.0.load(Ordering::SeqCst); + serializer.serialize_u64(value as u64) +} + +// Custom deserialization function +fn deserialize<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let value = u64::deserialize(deserializer)?; + Ok(Attempt(Arc::new(AtomicUsize::new(value as usize)))) +} + +impl Serialize for Attempt { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serialize(self, serializer) + } +} + +impl<'de> Deserialize<'de> for Attempt { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserialize(deserializer) + } +} + impl Default for Attempt { fn default() -> Self { Self(Arc::new(AtomicUsize::new(0))) diff --git a/packages/apalis-sql/src/context.rs b/packages/apalis-sql/src/context.rs index a0774993..4b295419 100644 --- a/packages/apalis-sql/src/context.rs +++ b/packages/apalis-sql/src/context.rs @@ -7,7 +7,7 @@ use std::{fmt, str::FromStr}; /// The context for a job is represented here /// Used to provide a context for a job with an sql backend -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct SqlContext { id: TaskId, status: State, diff --git a/packages/apalis-sql/src/from_row.rs b/packages/apalis-sql/src/from_row.rs index 00d748f7..b47f75c9 100644 --- a/packages/apalis-sql/src/from_row.rs +++ b/packages/apalis-sql/src/from_row.rs @@ -1,10 +1,11 @@ use apalis_core::task::task_id::TaskId; use apalis_core::{data::Extensions, request::Request, worker::WorkerId}; +use serde::{Deserialize, Serialize}; use sqlx::{Decode, Type}; use crate::context::SqlContext; /// Wrapper for [Request] -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct SqlRequest { req: T, context: SqlContext, From 1238fb0b5eedcac73390e4b249b0d07f6abfff27 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Tue, 16 Jul 2024 23:08:16 +0300 Subject: [PATCH 32/59] fix: handle attempts in storages (#373) * fix: handle attempts in storages * fix: chrono serialization * fix: tests failing because of tests --- packages/apalis-core/src/layers.rs | 19 ++++++++----------- packages/apalis-redis/src/storage.rs | 1 + packages/apalis-sql/Cargo.toml | 2 ++ packages/apalis-sql/src/context.rs | 2 +- packages/apalis-sql/src/from_row.rs | 2 +- packages/apalis-sql/src/mysql.rs | 7 +++++-- packages/apalis-sql/src/postgres.rs | 15 +++++++++------ packages/apalis-sql/src/sqlite.rs | 10 +++++++--- 8 files changed, 34 insertions(+), 24 deletions(-) diff --git a/packages/apalis-core/src/layers.rs b/packages/apalis-core/src/layers.rs index 29984a2d..82f9ca04 100644 --- a/packages/apalis-core/src/layers.rs +++ b/packages/apalis-core/src/layers.rs @@ -1,7 +1,9 @@ +use crate::task::attempt::Attempt; use crate::{request::Request, worker::WorkerId}; use futures::channel::mpsc::{SendError, Sender}; use futures::SinkExt; use futures::{future::BoxFuture, Future, FutureExt}; +use serde::{Deserialize, Serialize}; use std::marker::PhantomData; use std::{fmt, sync::Arc}; pub use tower::{ @@ -168,7 +170,7 @@ pub trait Ack { } /// ACK response -#[derive(Debug)] +#[derive(Debug, Serialize, Deserialize)] pub struct AckResponse { /// The worker id pub worker: WorkerId, @@ -176,16 +178,8 @@ pub struct AckResponse { pub acknowledger: A, /// The stringified result pub result: Result, -} - -impl AckResponse { - /// Output a json for the response - pub fn to_json(&self) -> String { - format!( - r#"{{"worker": "{}", "acknowledger": "{}", "result": "{:?}"}}"#, - self.worker, self.acknowledger, self.result - ) - } + /// The number of attempts made by the request + pub attempts: Attempt, } /// A generic stream that emits (worker_id, task_id) @@ -286,6 +280,8 @@ where let mut ack = self.ack.clone(); let worker_id = self.worker_id.clone(); let data = request.get::<>::Acknowledger>().cloned(); + let attempts = request.get::().cloned().unwrap_or_default(); + let fut = self.service.call(request); let fut_with_ack = async move { let res = fut.await; @@ -299,6 +295,7 @@ where worker: worker_id, acknowledger: task_id, result, + attempts, }) .await { diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index f7510baf..0dcacff9 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -1082,6 +1082,7 @@ mod tests { acknowledger: job_id.clone(), result: Ok("Success".to_string()), worker: worker_id.clone(), + attempts: Attempt::new_with_value(0) }) .await .expect("failed to acknowledge the job"); diff --git a/packages/apalis-sql/Cargo.toml b/packages/apalis-sql/Cargo.toml index 8e984f62..8187dd16 100644 --- a/packages/apalis-sql/Cargo.toml +++ b/packages/apalis-sql/Cargo.toml @@ -35,6 +35,8 @@ async-stream = "0.3.5" tokio = { version = "1", features = ["rt", "net"], optional = true } futures-lite = "2.3.0" async-std = { version = "1.12.0", optional = true } +chrono = { version = "0.4", features = ["serde"] } + [dev-dependencies] tokio = { version = "1", features = ["macros", "rt-multi-thread"] } diff --git a/packages/apalis-sql/src/context.rs b/packages/apalis-sql/src/context.rs index 4b295419..7d5f3856 100644 --- a/packages/apalis-sql/src/context.rs +++ b/packages/apalis-sql/src/context.rs @@ -2,7 +2,7 @@ use apalis_core::error::Error; use apalis_core::task::{attempt::Attempt, task_id::TaskId}; use apalis_core::worker::WorkerId; use serde::{Deserialize, Serialize}; -use sqlx::types::chrono::{DateTime, Utc}; +use chrono::{DateTime, Utc}; use std::{fmt, str::FromStr}; /// The context for a job is represented here diff --git a/packages/apalis-sql/src/from_row.rs b/packages/apalis-sql/src/from_row.rs index b47f75c9..7151e1ef 100644 --- a/packages/apalis-sql/src/from_row.rs +++ b/packages/apalis-sql/src/from_row.rs @@ -70,7 +70,7 @@ impl<'r, T: Decode<'r, sqlx::Sqlite> + Type> sqlx::FromRow<'r, sqlx::sqlite::SqliteRow> for SqlRequest { fn from_row(row: &'r sqlx::sqlite::SqliteRow) -> Result { - use sqlx::types::chrono::DateTime; + use chrono::DateTime; use sqlx::Row; use std::str::FromStr; diff --git a/packages/apalis-sql/src/mysql.rs b/packages/apalis-sql/src/mysql.rs index 1dcbdb28..afedb48c 100644 --- a/packages/apalis-sql/src/mysql.rs +++ b/packages/apalis-sql/src/mysql.rs @@ -17,7 +17,7 @@ use log::error; use serde::{de::DeserializeOwned, Serialize}; use serde_json::Value; use sqlx::mysql::MySqlRow; -use sqlx::types::chrono::{DateTime, Utc}; +use chrono::{DateTime, Utc}; use sqlx::{MySql, Pool, Row}; use std::any::type_name; use std::convert::TryInto; @@ -396,11 +396,12 @@ impl Backend Backend { if let Some(ids) = ids { - let ack_ids: Vec<(String, String, String, String)> = ids.iter().map(|c| { - (c.acknowledger.to_string(), c.worker.to_string(), serde_json::to_string(&c.result).unwrap(), calculate_status(&c.result).to_string()) + let ack_ids: Vec<(String, String, String, String, u64)> = ids.iter().map(|c| { + (c.acknowledger.to_string(), c.worker.to_string(), serde_json::to_string(&c.result).unwrap(), calculate_status(&c.result).to_string(), c.attempts.current() as u64) }).collect(); let query = - "UPDATE apalis.jobs SET status = Q.status, done_at = now(), lock_by = Q.lock_by, last_error = Q.result FROM ( - SELECT(value-->0)::text as id, (value->>1)::text as worker_id, (value->>2)::text as result, (value->>3)::text as status FROM json_array_elements($1) + "UPDATE apalis.jobs SET status = Q.status, done_at = now(), lock_by = Q.lock_by, last_error = Q.result, attempts = Q.attempts FROM ( + SELECT(value-->0)::text as id, (value->>1)::text as worker_id, (value->>2)::text as result, (value->>3)::text as status, (value->>4)::int as attempts FROM json_array_elements($1) ) Q WHERE id = Q.id"; if let Err(e) = sqlx::query(query) @@ -611,8 +611,9 @@ mod tests { use crate::context::State; use super::*; + use apalis_core::task::attempt::Attempt; use email_service::Email; - use sqlx::types::chrono::Utc; + use chrono::Utc; /// migrate DB and return a storage instance. async fn setup() -> PostgresStorage { @@ -729,6 +730,8 @@ mod tests { acknowledger: job_id.clone(), result: Ok("Success".to_string()), worker: worker_id.clone(), + attempts: Attempt::new_with_value(0) + }) .await .expect("failed to acknowledge the job"); diff --git a/packages/apalis-sql/src/sqlite.rs b/packages/apalis-sql/src/sqlite.rs index c6b7d4cd..fba94bf0 100644 --- a/packages/apalis-sql/src/sqlite.rs +++ b/packages/apalis-sql/src/sqlite.rs @@ -16,7 +16,7 @@ use apalis_core::{Backend, BoxCodec}; use async_stream::try_stream; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use serde::{de::DeserializeOwned, Serialize}; -use sqlx::types::chrono::Utc; +use chrono::Utc; use sqlx::{Pool, Row, Sqlite}; use std::any::type_name; use std::convert::TryInto; @@ -484,7 +484,7 @@ impl Ack for SqliteStorage { async fn ack(&mut self, res: AckResponse) -> Result<(), sqlx::Error> { let pool = self.pool.clone(); let query = - "UPDATE Jobs SET status = ?4, done_at = strftime('%s','now'), last_error = ?3 WHERE id = ?1 AND lock_by = ?2"; + "UPDATE Jobs SET status = ?4, done_at = strftime('%s','now'), last_error = ?3, attempts =?5 WHERE id = ?1 AND lock_by = ?2"; let result = serde_json::to_string(&res.result) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; sqlx::query(query) @@ -492,6 +492,7 @@ impl Ack for SqliteStorage { .bind(res.worker.to_string()) .bind(result) .bind(calculate_status(&res.result).to_string()) + .bind(res.attempts.current() as i64) .execute(&pool) .await?; Ok(()) @@ -504,9 +505,10 @@ mod tests { use crate::context::State; use super::*; + use apalis_core::task::attempt::Attempt; use email_service::Email; use futures::StreamExt; - use sqlx::types::chrono::Utc; + use chrono::Utc; /// migrate DB and return a storage instance. async fn setup() -> SqliteStorage { @@ -618,6 +620,8 @@ mod tests { acknowledger: job_id.clone(), result: Ok("Success".to_string()), worker: worker_id.clone(), + attempts: Attempt::new_with_value(0) + }) .await .expect("failed to acknowledge the job"); From 04f4987d54efa022ab6289bc94dc889db2110a8e Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Fri, 19 Jul 2024 09:26:08 +0300 Subject: [PATCH 33/59] add: test utils that allow backend polling during tests (#374) * add: test utils that allow backend polling during tests * fix: introduce testwrapper and add more tests * fix: add sample for testing * fix: more fixes and actions fixes * fix: more fixes on vacuuming * tests: improve cleanup and generic testing * fix: improve testing and fix some found bugs * fix: postgres query and remove incompatible tests * fix: remove redis incompatible check * fix: minor fixes * fix: postgres json elements --- .github/workflows/mysql.yaml | 7 - .github/workflows/postgres.yaml | 9 +- .github/workflows/redis.yaml | 4 - .github/workflows/sqlite.yaml | 9 +- examples/email-service/Cargo.toml | 3 +- examples/email-service/src/lib.rs | 43 ++++- examples/redis-with-msg-pack/src/main.rs | 6 +- examples/redis/src/main.rs | 2 +- packages/apalis-core/Cargo.toml | 3 +- packages/apalis-core/src/codec/json.rs | 15 +- packages/apalis-core/src/error.rs | 20 +- packages/apalis-core/src/lib.rs | 226 +++++++++++++++++++++++ packages/apalis-core/src/memory.rs | 15 +- packages/apalis-core/src/monitor/mod.rs | 9 +- packages/apalis-core/src/request.rs | 13 +- packages/apalis-core/src/response.rs | 18 +- packages/apalis-core/src/task/task_id.rs | 2 +- packages/apalis-core/src/worker/mod.rs | 2 +- packages/apalis-cron/src/lib.rs | 3 +- packages/apalis-redis/Cargo.toml | 1 + packages/apalis-redis/src/storage.rs | 25 +-- packages/apalis-sql/Cargo.toml | 4 +- packages/apalis-sql/src/context.rs | 6 +- packages/apalis-sql/src/from_row.rs | 20 +- packages/apalis-sql/src/lib.rs | 88 ++++++++- packages/apalis-sql/src/mysql.rs | 115 ++++-------- packages/apalis-sql/src/postgres.rs | 113 +++++------- packages/apalis-sql/src/sqlite.rs | 68 +++---- src/layers/catch_panic/mod.rs | 5 +- 29 files changed, 573 insertions(+), 281 deletions(-) diff --git a/.github/workflows/mysql.yaml b/.github/workflows/mysql.yaml index 6b67a82b..236c01c8 100644 --- a/.github/workflows/mysql.yaml +++ b/.github/workflows/mysql.yaml @@ -1,11 +1,4 @@ on: - push: - paths: - - "packages/apalis-sql/src/lib.rs" - - "packages/apalis-sql/mysql.rs" - - "packages/apalis-sql/src/migrations/mysql/**" - - "packages/apalis-sql/src/Cargo.toml" - - ".github/workflows/mysql.yaml" pull_request: paths: - "packages/apalis-sql/src/lib.rs" diff --git a/.github/workflows/postgres.yaml b/.github/workflows/postgres.yaml index 050753e4..5845f0c1 100644 --- a/.github/workflows/postgres.yaml +++ b/.github/workflows/postgres.yaml @@ -1,11 +1,4 @@ on: - push: - paths: - - "packages/apalis-sql/src/lib.rs" - - "packages/apalis-sql/postgres.rs" - - "packages/apalis-sql/src/migrations/postgres/**" - - "packages/apalis-sql/src/Cargo.toml" - - ".github/workflows/postgres.yaml" pull_request: paths: - "packages/apalis-sql/src/lib.rs" @@ -37,4 +30,4 @@ jobs: toolchain: stable override: true - run: cargo test --no-default-features --features postgres,migrate,tokio-comp -- --test-threads=1 - working-directory: packages/apalis-sql \ No newline at end of file + working-directory: packages/apalis-sql diff --git a/.github/workflows/redis.yaml b/.github/workflows/redis.yaml index 53990175..68cbc038 100644 --- a/.github/workflows/redis.yaml +++ b/.github/workflows/redis.yaml @@ -1,8 +1,4 @@ on: - push: - paths: - - "packages/apalis-redis/**" - - ".github/workflows/redis.yaml" pull_request: paths: - "packages/apalis-redis/**" diff --git a/.github/workflows/sqlite.yaml b/.github/workflows/sqlite.yaml index 1319f259..f7129bf2 100644 --- a/.github/workflows/sqlite.yaml +++ b/.github/workflows/sqlite.yaml @@ -1,11 +1,4 @@ on: - push: - paths: - - "packages/apalis-sql/src/lib.rs" - - "packages/apalis-sql/src/sqlite.rs" - - "packages/apalis-sql/src/migrations/sqlite/**" - - "packages/apalis-sql/src/Cargo.toml" - - ".github/workflows/sqlite.yaml" pull_request: paths: - "packages/apalis-sql/src/lib.rs" @@ -28,4 +21,4 @@ jobs: toolchain: stable override: true - run: cargo test --no-default-features --features sqlite,migrate,tokio-comp -- --test-threads=1 - working-directory: packages/apalis-sql \ No newline at end of file + working-directory: packages/apalis-sql diff --git a/examples/email-service/Cargo.toml b/examples/email-service/Cargo.toml index 8ede34ed..3aca96a1 100644 --- a/examples/email-service/Cargo.toml +++ b/examples/email-service/Cargo.toml @@ -8,4 +8,5 @@ apalis = { path = "../../", default-features = false } futures-util = "0.3.0" serde_json = "1.0" serde = { version = "1.0", features = ["derive"] } -log = "0.4" \ No newline at end of file +log = "0.4" +email_address = "0.2.5" diff --git a/examples/email-service/src/lib.rs b/examples/email-service/src/lib.rs index f5f833dd..467de899 100644 --- a/examples/email-service/src/lib.rs +++ b/examples/email-service/src/lib.rs @@ -1,3 +1,7 @@ +use std::{str::FromStr, sync::Arc}; + +use apalis::prelude::*; +use email_address::EmailAddress; use serde::{Deserialize, Serialize}; #[derive(Debug, Deserialize, Serialize, Clone)] @@ -7,8 +11,43 @@ pub struct Email { pub text: String, } -pub async fn send_email(job: Email) { - log::info!("Attempting to send email to {}", job.to); +pub async fn send_email(job: Email) -> Result<(), Error> { + let validation = EmailAddress::from_str(&job.to); + match validation { + Ok(email) => { + log::info!("Attempting to send email to {}", email.as_str()); + Ok(()) + } + Err(email_address::Error::InvalidCharacter) => { + log::error!("Killed send email job. Invalid character {}", job.to); + Err(Error::Abort(String::from("Invalid character. Job killed"))) + } + Err(e) => Err(Error::Failed(Arc::new(Box::new(e)))), + } +} + +pub fn example_good_email() -> Email { + Email { + subject: "Test Subject".to_string(), + to: "example@gmail.com".to_string(), + text: "Some Text".to_string(), + } +} + +pub fn example_killed_email() -> Email { + Email { + subject: "Test Subject".to_string(), + to: "example@©.com".to_string(), // killed because it has © which is invalid + text: "Some Text".to_string(), + } +} + +pub fn example_retry_able_email() -> Email { + Email { + subject: "Test Subject".to_string(), + to: "example".to_string(), + text: "Some Text".to_string(), + } } pub const FORM_HTML: &str = r#" diff --git a/examples/redis-with-msg-pack/src/main.rs b/examples/redis-with-msg-pack/src/main.rs index 9a37165a..4afa89ad 100644 --- a/examples/redis-with-msg-pack/src/main.rs +++ b/examples/redis-with-msg-pack/src/main.rs @@ -1,4 +1,4 @@ -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use anyhow::Result; use apalis::prelude::*; @@ -13,11 +13,11 @@ struct MessagePack; impl Codec> for MessagePack { type Error = Error; fn encode(&self, input: &T) -> Result, Self::Error> { - rmp_serde::to_vec(input).map_err(|e| Error::SourceError(Box::new(e))) + rmp_serde::to_vec(input).map_err(|e| Error::SourceError(Arc::new(Box::new(e)))) } fn decode(&self, compact: &Vec) -> Result { - rmp_serde::from_slice(compact).map_err(|e| Error::SourceError(Box::new(e))) + rmp_serde::from_slice(compact).map_err(|e| Error::SourceError(Arc::new(Box::new(e)))) } } diff --git a/examples/redis/src/main.rs b/examples/redis/src/main.rs index 3a352783..b0c9dd9d 100644 --- a/examples/redis/src/main.rs +++ b/examples/redis/src/main.rs @@ -47,7 +47,7 @@ async fn main() -> Result<()> { produce_jobs(storage.clone()).await?; let worker = WorkerBuilder::new("rango-tango") - .chain(|svc| svc.map_err(Error::Failed)) + .chain(|svc| svc.map_err(|e| Error::Failed(Arc::new(e)))) .layer(RateLimitLayer::new(5, Duration::from_secs(1))) .layer(TimeoutLayer::new(Duration::from_millis(500))) .data(Count::default()) diff --git a/packages/apalis-core/Cargo.toml b/packages/apalis-core/Cargo.toml index 2a809bf9..b04ad9cf 100644 --- a/packages/apalis-core/Cargo.toml +++ b/packages/apalis-core/Cargo.toml @@ -29,10 +29,11 @@ optional = true [features] -default = [] +default = ["test-utils"] docsrs = ["document-features"] sleep = ["futures-timer"] json = ["serde_json"] +test-utils = [] [package.metadata.docs.rs] # defines the configuration attribute `docsrs` diff --git a/packages/apalis-core/src/codec/json.rs b/packages/apalis-core/src/codec/json.rs index 7ed7c7f8..ef85854c 100644 --- a/packages/apalis-core/src/codec/json.rs +++ b/packages/apalis-core/src/codec/json.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use crate::{error::Error, Codec}; use serde::{de::DeserializeOwned, Serialize}; use serde_json::Value; @@ -9,32 +11,33 @@ pub struct JsonCodec; impl Codec> for JsonCodec { type Error = Error; fn encode(&self, input: &T) -> Result, Self::Error> { - serde_json::to_vec(input).map_err(|e| Error::SourceError(Box::new(e))) + serde_json::to_vec(input).map_err(|e| Error::SourceError(Arc::new(Box::new(e)))) } fn decode(&self, compact: &Vec) -> Result { - serde_json::from_slice(compact).map_err(|e| Error::SourceError(Box::new(e))) + serde_json::from_slice(compact).map_err(|e| Error::SourceError(Arc::new(Box::new(e)))) } } impl Codec for JsonCodec { type Error = Error; fn encode(&self, input: &T) -> Result { - serde_json::to_string(input).map_err(|e| Error::SourceError(Box::new(e))) + serde_json::to_string(input).map_err(|e| Error::SourceError(Arc::new(Box::new(e)))) } fn decode(&self, compact: &String) -> Result { - serde_json::from_str(compact).map_err(|e| Error::SourceError(Box::new(e))) + serde_json::from_str(compact).map_err(|e| Error::SourceError(Arc::new(Box::new(e)))) } } impl Codec for JsonCodec { type Error = Error; fn encode(&self, input: &T) -> Result { - serde_json::to_value(input).map_err(|e| Error::SourceError(Box::new(e))) + serde_json::to_value(input).map_err(|e| Error::SourceError(Arc::new(Box::new(e)))) } fn decode(&self, compact: &Value) -> Result { - serde_json::from_value(compact.clone()).map_err(|e| Error::SourceError(Box::new(e))) + serde_json::from_value(compact.clone()) + .map_err(|e| Error::SourceError(Arc::new(Box::new(e)))) } } diff --git a/packages/apalis-core/src/error.rs b/packages/apalis-core/src/error.rs index 0f526999..6c812863 100644 --- a/packages/apalis-core/src/error.rs +++ b/packages/apalis-core/src/error.rs @@ -1,4 +1,4 @@ -use std::error::Error as StdError; +use std::{error::Error as StdError, sync::Arc}; use thiserror::Error; use crate::worker::WorkerError; @@ -7,28 +7,24 @@ use crate::worker::WorkerError; pub type BoxDynError = Box; /// Represents a general error returned by a task or by internals of the platform -#[derive(Error, Debug)] +#[derive(Error, Debug, Clone)] #[non_exhaustive] pub enum Error { /// An error occurred during execution. #[error("FailedError: {0}")] - Failed(#[source] BoxDynError), + Failed(#[source] Arc), /// A generic IO error #[error("IoError: {0}")] - Io(#[from] std::io::Error), + Io(#[from] Arc), /// Missing some context and yet it was requested during execution. #[error("MissingContextError: {0}")] MissingContext(String), /// Execution was aborted - #[error("AbortError")] - Abort, - - /// Execution failed and job will be retried - #[error("RetryError: {0}")] - Retry(#[source] BoxDynError), + #[error("AbortError: {0}")] + Abort(String), /// Encountered an error during worker execution #[error("WorkerError: {0}")] @@ -38,11 +34,11 @@ pub enum Error { /// Encountered an error during service execution /// This should not be used inside a task function #[error("Encountered an error during service execution")] - ServiceError(#[source] BoxDynError), + ServiceError(#[source] Arc), #[doc(hidden)] /// Encountered an error during service execution /// This should not be used inside a task function #[error("Encountered an error during streaming")] - SourceError(#[source] BoxDynError), + SourceError(#[source] Arc), } diff --git a/packages/apalis-core/src/lib.rs b/packages/apalis-core/src/lib.rs index 1ab6244c..430014ae 100644 --- a/packages/apalis-core/src/lib.rs +++ b/packages/apalis-core/src/lib.rs @@ -168,3 +168,229 @@ impl crate::executor::Executor for TestExecutor { tokio::spawn(future); } } + +#[cfg(feature = "test-utils")] +/// Test utilities that allows you to test backends +pub mod test_utils { + use crate::error::BoxDynError; + use crate::request::Request; + use crate::storage::Storage; + use crate::task::task_id::TaskId; + use crate::worker::WorkerId; + use crate::Backend; + use futures::channel::mpsc::{channel, Receiver, Sender}; + use futures::future::BoxFuture; + use futures::stream::{Stream, StreamExt}; + use futures::{Future, FutureExt, SinkExt}; + use std::fmt::Debug; + use std::marker::PhantomData; + use std::ops::{Deref, DerefMut}; + use std::pin::Pin; + use std::task::{Context, Poll}; + use tower::{Layer, Service}; + + /// Define a dummy service + #[derive(Debug, Clone)] + pub struct DummyService; + + impl Service for DummyService { + type Response = Request; + type Error = std::convert::Infallible; + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: Request) -> Self::Future { + let fut = async move { Ok(req) }; + Box::pin(fut) + } + } + + /// A generic backend wrapper that polls and executes jobs + #[derive(Debug)] + pub struct TestWrapper { + stop_tx: Sender<()>, + res_rx: Receiver<(TaskId, Result)>, + _p: PhantomData, + backend: B, + } + /// A test wrapper to allow you to test without requiring a worker. + /// Important for testing backends and jobs + /// # Example + /// ```no_run + /// #[cfg(tests)] + /// mod tests { + /// use crate::{ + /// error::Error, memory::MemoryStorage, mq::MessageQueue, service_fn::service_fn, + /// }; + /// + /// use super::*; + /// + /// async fn is_even(req: usize) -> Result<(), Error> { + /// if req % 2 == 0 { + /// Ok(()) + /// } else { + /// Err(Error::Abort("Not an even number".to_string())) + /// } + /// } + /// + /// #[tokio::test] + /// async fn test_accepts_even() { + /// let backend = MemoryStorage::new(); + /// let (mut tester, poller) = TestWrapper::new_with_service(backend, service_fn(is_even)); + /// tokio::spawn(poller); + /// tester.enqueue(42usize).await.unwrap(); + /// assert_eq!(tester.size().await.unwrap(), 1); + /// let (_, resp) = tester.execute_next().await; + /// assert_eq!(resp, Ok("()".to_string())); + /// } + ///} + /// ```` + impl TestWrapper + where + B: Backend> + Send + Sync + 'static + Clone, + Req: Send + 'static, + B::Stream: Send + 'static, + B::Stream: Stream>, crate::error::Error>> + Unpin, + { + /// Build a new instance provided a custom service + pub fn new_with_service(backend: B, service: S) -> (Self, BoxFuture<'static, ()>) + where + S: Service> + Send + 'static, + B::Layer: Layer, + <>>::Layer as Layer>::Service: Service> + Send + 'static, + <<>>::Layer as Layer>::Service as Service>>::Response: Send + Debug, + <<>>::Layer as Layer>::Service as Service>>::Error: Send + Into + Sync, + <<>>::Layer as Layer>::Service as Service>>::Future: Send + 'static, + { + let worker_id = WorkerId::new("test-worker"); + let b = backend.clone(); + let mut poller = b.poll(worker_id); + let (stop_tx, mut stop_rx) = channel::<()>(1); + + let (mut res_tx, res_rx) = channel(10); + + let mut service = poller.layer.layer(service); + + let poller = async move { + let heartbeat = poller.heartbeat.shared(); + loop { + futures::select! { + + item = poller.stream.next().fuse() => match item { + Some(Ok(Some(req))) => { + + let task_id = req.get::().cloned().unwrap_or_default(); + // .expect("Request does not contain Task_ID"); + // handle request + match service.call(req).await { + Ok(res) => { + res_tx.send((task_id, Ok(format!("{res:?}")))).await.unwrap(); + }, + Err(err) => { + res_tx.send((task_id, Err(err.into().to_string()))).await.unwrap(); + } + } + } + Some(Ok(None)) | None => break, + Some(Err(_e)) => { + // handle error + break; + } + }, + _ = stop_rx.next().fuse() => break, + _ = heartbeat.clone().fuse() => { + + }, + } + } + }; + ( + Self { + stop_tx, + res_rx, + _p: PhantomData, + backend, + }, + poller.boxed(), + ) + } + + /// Stop polling + pub fn stop(mut self) { + let _ = self.stop_tx.send(()); + } + + /// Gets the current state of results + pub async fn execute_next(&mut self) -> (TaskId, Result) { + self.res_rx.next().await.unwrap() + } + } + + impl Deref for TestWrapper + where + B: Backend>, + { + type Target = B; + + fn deref(&self) -> &Self::Target { + &self.backend + } + } + + impl DerefMut for TestWrapper + where + B: Backend>, + { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.backend + } + } + + pub use tower::service_fn as apalis_test_service_fn; + + #[macro_export] + /// Tests a generic mq + macro_rules! test_message_queue { + ($backend_instance:expr) => { + #[tokio::test] + async fn it_works_as_an_mq_backend() { + let backend = $backend_instance; + let service = apalis_test_service_fn(|request: Request| async { + Ok::<_, io::Error>(request) + }); + let (mut t, poller) = TestWrapper::new_with_service(backend, service); + tokio::spawn(poller); + t.enqueue(1).await.unwrap(); + tokio::time::sleep(Duration::from_secs(1)).await; + let _res = t.execute_next().await; + // assert_eq!(res.len(), 1); // One job is done + } + }; + } + #[macro_export] + /// Tests a generic storage + macro_rules! generic_storage_test { + ($setup:path ) => { + #[tokio::test] + async fn integration_test_storage_push_and_consume() { + let backend = $setup().await; + let service = apalis_test_service_fn(|request: Request| async move { + Ok::<_, io::Error>(request.take()) + }); + let (mut t, poller) = TestWrapper::new_with_service(backend, service); + tokio::spawn(poller); + let res = t.len().await.unwrap(); + assert_eq!(res, 0); // No jobs + t.push(1).await.unwrap(); + let res = t.len().await.unwrap(); + assert_eq!(res, 1); // A job exists + let res = t.execute_next().await; + assert_eq!(res.1, Ok("1".to_owned())); + t.vacuum().await.unwrap(); + } + }; + } +} diff --git a/packages/apalis-core/src/memory.rs b/packages/apalis-core/src/memory.rs index 558b0e30..250190c4 100644 --- a/packages/apalis-core/src/memory.rs +++ b/packages/apalis-core/src/memory.rs @@ -52,8 +52,8 @@ impl Clone for MemoryStorage { /// In-memory queue that implements [Stream] #[derive(Debug)] pub struct MemoryWrapper { - sender: Sender, - receiver: Arc>>, + sender: Sender>, + receiver: Arc>>>, } impl Clone for MemoryWrapper { @@ -84,7 +84,7 @@ impl Default for MemoryWrapper { } impl Stream for MemoryWrapper { - type Item = T; + type Item = Request; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { if let Some(mut receiver) = self.receiver.try_lock() { @@ -102,7 +102,7 @@ impl Backend> for MemoryStorage { type Layer = Identity; fn poll(self, _worker: WorkerId) -> Poller { - let stream = self.inner.map(|r| Ok(Some(Request::new(r)))).boxed(); + let stream = self.inner.map(|r| Ok(Some(r))).boxed(); Poller { stream: BackendStream::new(stream, self.controller), heartbeat: Box::pin(async {}), @@ -114,12 +114,15 @@ impl Backend> for MemoryStorage { impl MessageQueue for MemoryStorage { type Error = (); async fn enqueue(&mut self, message: Message) -> Result<(), Self::Error> { - self.inner.sender.try_send(message).unwrap(); + self.inner + .sender + .try_send(Request::new(message)) + .map_err(|_| ())?; Ok(()) } async fn dequeue(&mut self) -> Result, ()> { - Ok(self.inner.receiver.lock().await.next().await) + Ok(self.inner.receiver.lock().await.next().await.map(|r| r.req)) } async fn size(&mut self) -> Result { diff --git a/packages/apalis-core/src/monitor/mod.rs b/packages/apalis-core/src/monitor/mod.rs index aad4dafc..beee0f56 100644 --- a/packages/apalis-core/src/monitor/mod.rs +++ b/packages/apalis-core/src/monitor/mod.rs @@ -297,6 +297,7 @@ impl Monitor { #[cfg(test)] mod tests { + use crate::test_utils::apalis_test_service_fn; use std::{io, time::Duration}; use tokio::time::sleep; @@ -307,11 +308,15 @@ mod tests { monitor::Monitor, mq::MessageQueue, request::Request, + test_message_queue, + test_utils::TestWrapper, TestExecutor, }; + test_message_queue!(MemoryStorage::new()); + #[tokio::test] - async fn it_works() { + async fn it_works_with_workers() { let backend = MemoryStorage::new(); let mut handle = backend.clone(); @@ -342,7 +347,7 @@ mod tests { let mut handle = backend.clone(); tokio::spawn(async move { - for i in 0..1000 { + for i in 0..10 { handle.enqueue(i).await.unwrap(); } }); diff --git a/packages/apalis-core/src/request.rs b/packages/apalis-core/src/request.rs index d6478240..ac381472 100644 --- a/packages/apalis-core/src/request.rs +++ b/packages/apalis-core/src/request.rs @@ -4,7 +4,10 @@ use tower::layer::util::Identity; use std::{fmt::Debug, pin::Pin}; -use crate::{data::Extensions, error::Error, poller::Poller, worker::WorkerId, Backend}; +use crate::{ + data::Extensions, error::Error, poller::Poller, task::task_id::TaskId, worker::WorkerId, + Backend, +}; /// Represents a job which can be serialized and executed @@ -18,10 +21,10 @@ pub struct Request { impl Request { /// Creates a new [Request] pub fn new(req: T) -> Self { - Self { - req, - data: Extensions::new(), - } + let id = TaskId::new(); + let mut data = Extensions::new(); + data.insert(id); + Self::new_with_data(req, data) } /// Creates a request with context provided diff --git a/packages/apalis-core/src/response.rs b/packages/apalis-core/src/response.rs index 7c2a231d..efda8920 100644 --- a/packages/apalis-core/src/response.rs +++ b/packages/apalis-core/src/response.rs @@ -1,4 +1,4 @@ -use std::any::Any; +use std::{any::Any, sync::Arc}; use crate::error::Error; @@ -15,22 +15,30 @@ impl IntoResponse for bool { fn into_response(self) -> std::result::Result { match self { true => Ok(true), - false => Err(Error::Failed(Box::new(std::io::Error::new( + false => Err(Error::Failed(Arc::new(Box::new(std::io::Error::new( std::io::ErrorKind::Other, "Job returned false", - )))), + ))))), } } } -impl IntoResponse +impl IntoResponse for std::result::Result { type Result = Result; fn into_response(self) -> Result { match self { Ok(value) => Ok(value), - Err(e) => Err(Error::Failed(Box::new(e))), + Err(e) => { + // Try to downcast the error to see if it is already of type `Error` + if let Some(custom_error) = + (&e as &(dyn std::error::Error + 'static)).downcast_ref::() + { + return Err(custom_error.clone()); + } + Err(Error::Failed(Arc::new(Box::new(e)))) + } } } } diff --git a/packages/apalis-core/src/task/task_id.rs b/packages/apalis-core/src/task/task_id.rs index 6d6a2504..455e531f 100644 --- a/packages/apalis-core/src/task/task_id.rs +++ b/packages/apalis-core/src/task/task_id.rs @@ -7,7 +7,7 @@ use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer}; use ulid::Ulid; /// A wrapper type that defines a task id. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Eq, Hash, PartialEq)] pub struct TaskId(Ulid); impl TaskId { diff --git a/packages/apalis-core/src/worker/mod.rs b/packages/apalis-core/src/worker/mod.rs index b9c071f0..6d911ecf 100644 --- a/packages/apalis-core/src/worker/mod.rs +++ b/packages/apalis-core/src/worker/mod.rs @@ -135,7 +135,7 @@ pub enum Event { } /// Possible errors that can occur when starting a worker. -#[derive(Error, Debug)] +#[derive(Error, Debug, Clone)] pub enum WorkerError { /// An error occurred while processing a job. #[error("Failed to process job: {0}")] diff --git a/packages/apalis-cron/src/lib.rs b/packages/apalis-cron/src/lib.rs index 024fa9f4..babcf245 100644 --- a/packages/apalis-cron/src/lib.rs +++ b/packages/apalis-cron/src/lib.rs @@ -68,6 +68,7 @@ use apalis_core::{error::Error, request::Request}; use chrono::{DateTime, TimeZone, Utc}; pub use cron::Schedule; use std::marker::PhantomData; +use std::sync::Arc; /// Represents a stream from a cron schedule with a timezone #[derive(Clone, Debug)] @@ -117,7 +118,7 @@ where match next { Some(next) => { let to_sleep = next - timezone.from_utc_datetime(&Utc::now().naive_utc()); - let to_sleep = to_sleep.to_std().map_err(|e| Error::Failed(e.into()))?; + let to_sleep = to_sleep.to_std().map_err(|e| Error::SourceError(Arc::new(e.into())))?; apalis_core::sleep(to_sleep).await; let mut data = Extensions::new(); data.insert(TaskId::new()); diff --git a/packages/apalis-redis/Cargo.toml b/packages/apalis-redis/Cargo.toml index 023ec645..bd0742ed 100644 --- a/packages/apalis-redis/Cargo.toml +++ b/packages/apalis-redis/Cargo.toml @@ -39,6 +39,7 @@ email-service = { path = "../../examples/email-service" } apalis = { path = "../../", default-features = false, features = [ "tokio-comp", ] } +apalis-core = { path = "../apalis-core", features = ["test-utils"] } [features] default = ["tokio-comp"] diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index 0dcacff9..faad9a48 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -559,6 +559,7 @@ impl Ack Ok(()) } + // TODO: Just automatically retry e if e.starts_with("RetryError") => { let retry_job = self.scripts.retry_job.clone(); let retry_jobs_set = &self.config.scheduled_jobs_set(); @@ -983,25 +984,32 @@ impl RedisStorage { #[cfg(test)] mod tests { + use apalis_core::generic_storage_test; use email_service::Email; + use apalis_core::test_utils::apalis_test_service_fn; + use apalis_core::test_utils::TestWrapper; + + generic_storage_test!(setup); + use super::*; /// migrate DB and return a storage instance. - async fn setup() -> RedisStorage { + async fn setup() -> RedisStorage { let redis_url = std::env::var("REDIS_URL").expect("No REDIS_URL is specified"); // Because connections cannot be shared across async runtime // (different runtimes are created for each test), // we don't share the storage and tests must be run sequentially. let conn = connect(redis_url).await.unwrap(); - let storage = RedisStorage::new(conn); + let mut storage = RedisStorage::new(conn); + cleanup(&mut storage, &WorkerId::new("test-worker")).await; storage } /// rollback DB changes made by tests. /// /// You should execute this function in the end of a test - async fn cleanup(mut storage: RedisStorage, _worker_id: &WorkerId) { + async fn cleanup(storage: &mut RedisStorage, _worker_id: &WorkerId) { let _resp: String = redis::cmd("FLUSHDB") .query_async(&mut storage.conn) .await @@ -1063,8 +1071,6 @@ mod tests { let worker_id = register_worker(&mut storage).await; let _job = consume_one(&mut storage, &worker_id).await; - - cleanup(storage, &worker_id).await; } #[tokio::test] @@ -1076,19 +1082,19 @@ mod tests { let job = consume_one(&mut storage, &worker_id).await; let job_id = &job.get::().unwrap().id; + let attempts = job.get::().unwrap().clone(); storage .ack(AckResponse { acknowledger: job_id.clone(), result: Ok("Success".to_string()), worker: worker_id.clone(), - attempts: Attempt::new_with_value(0) + attempts, }) .await .expect("failed to acknowledge the job"); let _job = get_job(&mut storage, &job_id).await; - cleanup(storage, &worker_id).await; } #[tokio::test] @@ -1108,8 +1114,6 @@ mod tests { .expect("failed to kill job"); let _job = get_job(&mut storage, &job_id).await; - - cleanup(storage, &worker_id).await; } #[tokio::test] @@ -1125,7 +1129,6 @@ mod tests { .reenqueue_orphaned(5, 300) .await .expect("failed to reenqueue_orphaned"); - cleanup(storage, &worker_id).await; } #[tokio::test] @@ -1141,7 +1144,5 @@ mod tests { .reenqueue_orphaned(5, 300) .await .expect("failed to reenqueue_orphaned"); - - cleanup(storage, &worker_id).await; } } diff --git a/packages/apalis-sql/Cargo.toml b/packages/apalis-sql/Cargo.toml index 8187dd16..b6c0431e 100644 --- a/packages/apalis-sql/Cargo.toml +++ b/packages/apalis-sql/Cargo.toml @@ -9,7 +9,7 @@ license = "MIT" description = "SQL Storage for apalis. Use sqlite, postgres and mysql for background job processing" [features] -default = ["sqlite", "migrate", "postgres"] +default = ["migrate"] postgres = ["sqlx/postgres", "sqlx/json"] sqlite = ["sqlx/sqlite", "sqlx/json"] mysql = ["sqlx/mysql", "sqlx/json", "sqlx/bigdecimal"] @@ -45,6 +45,8 @@ apalis = { path = "../../", default-features = false, features = [ "tokio-comp", ] } once_cell = "1.19.0" +apalis-sql = { path = ".", features = ["tokio-comp"] } +apalis-core = { path = "../apalis-core", features = ["test-utils"] } [package.metadata.docs.rs] # defines the configuration attribute `docsrs` diff --git a/packages/apalis-sql/src/context.rs b/packages/apalis-sql/src/context.rs index 7d5f3856..5aed23ef 100644 --- a/packages/apalis-sql/src/context.rs +++ b/packages/apalis-sql/src/context.rs @@ -1,8 +1,8 @@ use apalis_core::error::Error; use apalis_core::task::{attempt::Attempt, task_id::TaskId}; use apalis_core::worker::WorkerId; -use serde::{Deserialize, Serialize}; use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; use std::{fmt, str::FromStr}; /// The context for a job is represented here @@ -137,8 +137,6 @@ pub enum State { Running, /// Job was done successfully Done, - /// Retry Job - Retry, /// Job has failed. Check `last_error` Failed, /// Job has been killed @@ -159,7 +157,6 @@ impl FromStr for State { "Pending" | "Latest" => Ok(State::Pending), "Running" => Ok(State::Running), "Done" => Ok(State::Done), - "Retry" => Ok(State::Retry), "Failed" => Ok(State::Failed), "Killed" => Ok(State::Killed), _ => Err(Error::MissingContext("Invalid Job state".to_string())), @@ -173,7 +170,6 @@ impl fmt::Display for State { State::Pending => write!(f, "Pending"), State::Running => write!(f, "Running"), State::Done => write!(f, "Done"), - State::Retry => write!(f, "Retry"), State::Failed => write!(f, "Failed"), State::Killed => write!(f, "Killed"), } diff --git a/packages/apalis-sql/src/from_row.rs b/packages/apalis-sql/src/from_row.rs index 7151e1ef..89d6628b 100644 --- a/packages/apalis-sql/src/from_row.rs +++ b/packages/apalis-sql/src/from_row.rs @@ -1,5 +1,6 @@ use apalis_core::task::task_id::TaskId; use apalis_core::{data::Extensions, request::Request, worker::WorkerId}; +use chrono::Utc; use serde::{Deserialize, Serialize}; use sqlx::{Decode, Type}; @@ -149,11 +150,11 @@ impl<'r, T: Decode<'r, sqlx::Postgres> + Type> let max_attempts = row.try_get("max_attempts").unwrap_or(25); context.set_max_attempts(max_attempts); - let done_at: Option = row.try_get("done_at").unwrap_or_default(); - context.set_done_at(done_at); + let done_at: Option> = row.try_get("done_at").unwrap_or_default(); + context.set_done_at(done_at.map(|d| d.timestamp())); - let lock_at: Option = row.try_get("lock_at").unwrap_or_default(); - context.set_lock_at(lock_at); + let lock_at: Option> = row.try_get("lock_at").unwrap_or_default(); + context.set_lock_at(lock_at.map(|d| d.timestamp())); let last_error = row.try_get("last_error").unwrap_or_default(); context.set_last_error(last_error); @@ -187,9 +188,6 @@ impl<'r, T: Decode<'r, sqlx::MySql> + Type> sqlx::FromRow<'r, sqlx: fn from_row(row: &'r sqlx::mysql::MySqlRow) -> Result { use sqlx::Row; use std::str::FromStr; - - type Timestamp = i64; - let job: T = row.try_get("job")?; let id: TaskId = TaskId::from_str(row.try_get("id")?).map_err(|e| sqlx::Error::ColumnDecode { @@ -207,11 +205,11 @@ impl<'r, T: Decode<'r, sqlx::MySql> + Type> sqlx::FromRow<'r, sqlx: let max_attempts = row.try_get("max_attempts").unwrap_or(25); context.set_max_attempts(max_attempts); - let done_at: Option = row.try_get("done_at").unwrap_or_default(); - context.set_done_at(done_at); + let done_at: Option = row.try_get("done_at").unwrap_or_default(); + context.set_done_at(done_at.map(|d| d.and_utc().timestamp())); - let lock_at: Option = row.try_get("lock_at").unwrap_or_default(); - context.set_lock_at(lock_at); + let lock_at: Option = row.try_get("lock_at").unwrap_or_default(); + context.set_lock_at(lock_at.map(|d| d.and_utc().timestamp())); let last_error = row.try_get("last_error").unwrap_or_default(); context.set_last_error(last_error); diff --git a/packages/apalis-sql/src/lib.rs b/packages/apalis-sql/src/lib.rs index b5d21133..82d97197 100644 --- a/packages/apalis-sql/src/lib.rs +++ b/packages/apalis-sql/src/lib.rs @@ -49,7 +49,7 @@ impl Default for Config { Self { keep_alive: Duration::from_secs(30), buffer_size: 10, - poll_interval: Duration::from_millis(50), + poll_interval: Duration::from_millis(100), namespace: String::from("apalis::sql"), } } @@ -63,7 +63,7 @@ impl Config { /// Interval between database poll queries /// - /// Defaults to 30ms + /// Defaults to 100ms pub fn set_poll_interval(mut self, interval: Duration) -> Self { self.poll_interval = interval; self @@ -134,9 +134,91 @@ pub(crate) fn calculate_status(res: &Result) -> State { match res { Ok(_) => State::Done, Err(e) => match &e { - _ if e.starts_with("RetryError") => State::Retry, _ if e.starts_with("AbortError") => State::Killed, _ => State::Failed, }, } } + +/// Standard checks for any sql backend +#[macro_export] +macro_rules! sql_storage_tests { + ($setup:path, $storage_type:ty, $job_type:ty) => { + async fn setup_test_wrapper() -> TestWrapper<$storage_type, $job_type> { + let (mut t, poller) = TestWrapper::new_with_service( + $setup().await, + apalis_core::service_fn::service_fn(email_service::send_email), + ); + tokio::spawn(poller); + t.vacuum().await.unwrap(); + t + } + + #[tokio::test] + async fn integration_test_kill_job() { + let mut storage = setup_test_wrapper().await; + + storage + .push(email_service::example_killed_email()) + .await + .unwrap(); + + let (job_id, res) = storage.execute_next().await; + assert_eq!( + res, + Err("AbortError: Invalid character. Job killed".to_owned()) + ); + apalis_core::sleep(Duration::from_secs(1)).await; + let job = storage.fetch_by_id(&job_id).await.unwrap().unwrap(); + let ctx = job.get::().unwrap(); + assert_eq!(*ctx.status(), State::Killed); + assert!(ctx.done_at().is_some()); + assert_eq!( + ctx.last_error().clone().unwrap(), + "{\"Err\":\"AbortError: Invalid character. Job killed\"}" + ); + } + + #[tokio::test] + async fn integration_test_acknowledge_good_job() { + let mut storage = setup_test_wrapper().await; + storage + .push(email_service::example_good_email()) + .await + .unwrap(); + + let (job_id, res) = storage.execute_next().await; + assert_eq!(res, Ok("()".to_owned())); + apalis_core::sleep(Duration::from_secs(1)).await; + let job = storage.fetch_by_id(&job_id).await.unwrap().unwrap(); + let ctx = job.get::().unwrap(); + assert_eq!(*ctx.status(), State::Done); + assert!(ctx.done_at().is_some()); + } + + #[tokio::test] + async fn integration_test_acknowledge_failed_job() { + let mut storage = setup_test_wrapper().await; + + storage + .push(email_service::example_retry_able_email()) + .await + .unwrap(); + + let (job_id, res) = storage.execute_next().await; + assert_eq!( + res, + Err("FailedError: Missing separator character '@'.".to_owned()) + ); + apalis_core::sleep(Duration::from_secs(1)).await; + let job = storage.fetch_by_id(&job_id).await.unwrap().unwrap(); + let ctx = job.get::().unwrap(); + assert_eq!(*ctx.status(), State::Failed); + assert!(ctx.attempts().current() >= 1); + assert_eq!( + ctx.last_error().clone().unwrap(), + "{\"Err\":\"FailedError: Missing separator character '@'.\"}" + ); + } + }; +} diff --git a/packages/apalis-sql/src/mysql.rs b/packages/apalis-sql/src/mysql.rs index afedb48c..0e39c1c5 100644 --- a/packages/apalis-sql/src/mysql.rs +++ b/packages/apalis-sql/src/mysql.rs @@ -12,12 +12,12 @@ use apalis_core::task::task_id::TaskId; use apalis_core::worker::WorkerId; use apalis_core::{Backend, BoxCodec}; use async_stream::try_stream; +use chrono::{DateTime, Utc}; use futures::{Stream, StreamExt, TryStreamExt}; use log::error; use serde::{de::DeserializeOwned, Serialize}; use serde_json::Value; use sqlx::mysql::MySqlRow; -use chrono::{DateTime, Utc}; use sqlx::{MySql, Pool, Row}; use std::any::type_name; use std::convert::TryInto; @@ -385,7 +385,7 @@ impl Backend Backend MysqlStorage { mod tests { use crate::context::State; + use crate::sql_storage_tests; use super::*; use apalis_core::task::attempt::Attempt; + + use apalis_core::test_utils::DummyService; use email_service::Email; use futures::StreamExt; + use apalis_core::generic_storage_test; + use apalis_core::test_utils::apalis_test_service_fn; + use apalis_core::test_utils::TestWrapper; + + generic_storage_test!(setup); + + sql_storage_tests!(setup::, MysqlStorage, Email); + /// migrate DB and return a storage instance. - async fn setup() -> MysqlStorage { + async fn setup() -> MysqlStorage { let db_url = &std::env::var("DATABASE_URL").expect("No DATABASE_URL is specified"); // Because connections cannot be shared across async runtime // (different runtimes are created for each test), @@ -527,8 +538,8 @@ mod tests { MysqlStorage::setup(&pool) .await .expect("failed to migrate DB"); - let storage = MysqlStorage::new(pool); - + let mut storage = MysqlStorage::new(pool); + cleanup(&mut storage, &WorkerId::new("test-worker")).await; storage } @@ -538,9 +549,9 @@ mod tests { /// - worker identified by `worker_id` /// /// You should execute this function in the end of a test - async fn cleanup(storage: MysqlStorage, worker_id: &WorkerId) { - sqlx::query("DELETE FROM jobs WHERE lock_by = ? OR status = 'Pending'") - .bind(worker_id.to_string()) + async fn cleanup(storage: &mut MysqlStorage, worker_id: &WorkerId) { + sqlx::query("DELETE FROM jobs WHERE job_type = ?") + .bind(storage.config.namespace()) .execute(&storage.pool) .await .expect("failed to delete jobs"); @@ -551,9 +562,11 @@ mod tests { .expect("failed to delete worker"); } - async fn consume_one(storage: &MysqlStorage, worker_id: &WorkerId) -> Request { - let storage = storage.clone(); - let mut stream = storage.stream_jobs( + async fn consume_one( + storage: &mut MysqlStorage, + worker_id: &WorkerId, + ) -> Request { + let mut stream = storage.clone().stream_jobs( worker_id, std::time::Duration::from_secs(10), 1, @@ -575,8 +588,6 @@ mod tests { } } - struct DummyService {} - async fn register_worker_at( storage: &mut MysqlStorage, last_seen: DateTime, @@ -596,17 +607,13 @@ mod tests { register_worker_at(storage, now).await } - async fn push_email(storage: &mut S, email: Email) - where - S: Storage, - { + async fn push_email(storage: &mut MysqlStorage, email: Email) { storage.push(email).await.expect("failed to push a job"); } - async fn get_job(storage: &mut S, job_id: &TaskId) -> Request - where - S: Storage, - { + async fn get_job(storage: &mut MysqlStorage, job_id: &TaskId) -> Request { + // add a slight delay to allow background actions like ack to complete + apalis_core::sleep(Duration::from_secs(1)).await; storage .fetch_by_id(job_id) .await @@ -624,42 +631,9 @@ mod tests { let job = consume_one(&mut storage, &worker_id).await; let ctx = job.get::().unwrap(); // TODO: Fix assertions - // assert_eq!(*ctx.status(), State::Running); - // assert_eq!(*ctx.lock_by(), Some(worker_id.clone())); - // assert!(ctx.lock_at().is_some()); - - cleanup(storage, &worker_id).await; - } - - #[tokio::test] - async fn test_acknowledge_job() { - let mut storage = setup().await; - push_email(&mut storage, example_email()).await; - - let worker_id = register_worker(&mut storage).await; - - let job = consume_one(&mut storage, &worker_id).await; - let ctx = job.get::().unwrap(); - let job_id = ctx.id(); - - storage - .ack(AckResponse { - acknowledger: job_id.clone(), - result: Ok("Success".to_string()), - worker: worker_id.clone(), - attempts: Attempt::new_with_value(0) - }) - .await - .expect("failed to acknowledge the job"); - - let job = get_job(&mut storage, job_id).await; - let ctx = job.get::().unwrap(); - - // TODO: Fix assertions - // assert_eq!(*ctx.status(), State::Done); - // assert!(ctx.done_at().is_some()); - - cleanup(storage, &worker_id).await; + assert_eq!(*ctx.status(), State::Running); + assert_eq!(*ctx.lock_by(), Some(worker_id.clone())); + assert!(ctx.lock_at().is_some()); } #[tokio::test] @@ -683,10 +657,8 @@ mod tests { let job = get_job(&mut storage, job_id).await; let ctx = job.get::().unwrap(); // TODO: Fix assertions - // assert_eq!(*ctx.status(), State::Killed); - // assert!(ctx.done_at().is_some()); - - cleanup(storage, &worker_id).await; + assert_eq!(*ctx.status(), State::Killed); + assert!(ctx.done_at().is_some()); } #[tokio::test] @@ -720,14 +692,11 @@ mod tests { // then, the job status has changed to Pending let job = storage.fetch_by_id(ctx.id()).await.unwrap().unwrap(); let context = job.get::().unwrap(); - // TODO: Fix assertions - // assert_eq!(*context.status(), State::Pending); - // assert!(context.lock_by().is_none()); - // assert!(context.lock_at().is_none()); - // assert!(context.done_at().is_none()); - // assert_eq!(*context.last_error(), Some("Job was abandoned".to_string())); - - cleanup(storage, &worker_id).await; + assert_eq!(*context.status(), State::Pending); + assert!(context.lock_by().is_none()); + assert!(context.lock_at().is_none()); + assert!(context.done_at().is_none()); + assert_eq!(*context.last_error(), Some("Job was abandoned".to_string())); } #[tokio::test] @@ -762,9 +731,7 @@ mod tests { let job = storage.fetch_by_id(ctx.id()).await.unwrap().unwrap(); let context = job.get::().unwrap(); // TODO: Fix assertions - // assert_eq!(*context.status(), State::Running); - // assert_eq!(*context.lock_by(), Some(worker_id.clone())); - - cleanup(storage, &worker_id).await; + assert_eq!(*context.status(), State::Running); + assert_eq!(*context.lock_by(), Some(worker_id.clone())); } } diff --git a/packages/apalis-sql/src/postgres.rs b/packages/apalis-sql/src/postgres.rs index 356e87ce..e01941a4 100644 --- a/packages/apalis-sql/src/postgres.rs +++ b/packages/apalis-sql/src/postgres.rs @@ -53,13 +53,13 @@ use apalis_core::task::namespace::Namespace; use apalis_core::task::task_id::TaskId; use apalis_core::worker::WorkerId; use apalis_core::{Backend, BoxCodec}; +use chrono::{DateTime, Utc}; use futures::channel::mpsc; use futures::StreamExt; use futures::{select, stream, SinkExt}; use log::error; use serde::{de::DeserializeOwned, Serialize}; use sqlx::postgres::PgListener; -use chrono::{DateTime, Utc}; use sqlx::{Pool, Postgres, Row}; use std::any::type_name; use std::convert::TryInto; @@ -149,11 +149,11 @@ impl Backend Backend { if let Some(ids) = ids { let ack_ids: Vec<(String, String, String, String, u64)> = ids.iter().map(|c| { - (c.acknowledger.to_string(), c.worker.to_string(), serde_json::to_string(&c.result).unwrap(), calculate_status(&c.result).to_string(), c.attempts.current() as u64) + (c.acknowledger.to_string(), c.worker.to_string(), serde_json::to_string(&c.result).unwrap(), calculate_status(&c.result).to_string(), (c.attempts.current() + 1) as u64 ) }).collect(); let query = - "UPDATE apalis.jobs SET status = Q.status, done_at = now(), lock_by = Q.lock_by, last_error = Q.result, attempts = Q.attempts FROM ( - SELECT(value-->0)::text as id, (value->>1)::text as worker_id, (value->>2)::text as result, (value->>3)::text as status, (value->>4)::int as attempts FROM json_array_elements($1) - ) Q - WHERE id = Q.id"; + "UPDATE apalis.jobs + SET status = Q.status, + done_at = now(), + lock_by = Q.worker_id, + last_error = Q.result, + attempts = Q.attempts + FROM ( + SELECT (value->>0)::text as id, + (value->>1)::text as worker_id, + (value->>2)::text as result, + (value->>3)::text as status, + (value->>4)::int as attempts + FROM json_array_elements($1::json) + ) Q + WHERE apalis.jobs.id = Q.id; + "; if let Err(e) = sqlx::query(query) - .bind(serde_json::to_string(&ack_ids).unwrap()) + .bind(serde_json::to_value(&ack_ids).unwrap()) .execute(&pool) .await { - error!("AckError: {e}"); + panic!("AckError: {e}"); } } } @@ -207,13 +219,7 @@ impl Backend PostgresStorage { #[cfg(test)] mod tests { use crate::context::State; + use crate::sql_storage_tests; use super::*; use apalis_core::task::attempt::Attempt; - use email_service::Email; + use apalis_core::test_utils::DummyService; use chrono::Utc; + use email_service::Email; + + use apalis_core::generic_storage_test; + use apalis_core::test_utils::apalis_test_service_fn; + use apalis_core::test_utils::TestWrapper; + + generic_storage_test!(setup); + + sql_storage_tests!(setup::, PostgresStorage, Email); /// migrate DB and return a storage instance. - async fn setup() -> PostgresStorage { + async fn setup() -> PostgresStorage { let db_url = &std::env::var("DATABASE_URL").expect("No DATABASE_URL is specified"); let pool = PgPool::connect(&db_url).await.unwrap(); // Because connections cannot be shared across async runtime // (different runtimes are created for each test), // we don't share the storage and tests must be run sequentially. PostgresStorage::setup(&pool).await.unwrap(); - let storage = PostgresStorage::new(pool); + let mut storage = PostgresStorage::new(pool); + cleanup(&mut storage, &WorkerId::new("test-worker")).await; storage } /// rollback DB changes made by tests. /// Delete the following rows: - /// - jobs whose state is `Pending` or locked by `worker_id` + /// - jobs of the current type /// - worker identified by `worker_id` /// /// You should execute this function in the end of a test - async fn cleanup(storage: PostgresStorage, worker_id: &WorkerId) { + async fn cleanup(storage: &mut PostgresStorage, worker_id: &WorkerId) { let mut tx = storage .pool .acquire() .await .expect("failed to get connection"); - sqlx::query("Delete from apalis.jobs where lock_by = $1 or status = 'Pending'") + sqlx::query("Delete from apalis.jobs where job_type = $1 OR lock_by = $2") + .bind(storage.config.namespace()) .bind(worker_id.to_string()) .execute(&mut *tx) .await @@ -651,8 +669,6 @@ mod tests { .expect("failed to delete worker"); } - struct DummyService {} - fn example_email() -> Email { Email { subject: "Test Subject".to_string(), @@ -691,6 +707,8 @@ mod tests { } async fn get_job(storage: &mut PostgresStorage, job_id: &TaskId) -> Request { + // add a slight delay to allow background actions like ack to complete + apalis_core::sleep(Duration::from_secs(2)).await; storage .fetch_by_id(job_id) .await @@ -705,44 +723,15 @@ mod tests { let worker_id = register_worker(&mut storage).await; - let job = consume_one(&mut storage, &worker_id).await; - let ctx = job.get::().unwrap(); - assert_eq!(*ctx.status(), State::Running); - assert_eq!(*ctx.lock_by(), Some(worker_id.clone())); - // TODO: assert!(ctx.lock_at().is_some()); - - cleanup(storage, &worker_id).await; - } - - #[tokio::test] - async fn test_acknowledge_job() { - let mut storage = setup().await; - push_email(&mut storage, example_email()).await; - - let worker_id = register_worker(&mut storage).await; - let job = consume_one(&mut storage, &worker_id).await; let ctx = job.get::().unwrap(); let job_id = ctx.id(); - - storage - .ack(AckResponse { - acknowledger: job_id.clone(), - result: Ok("Success".to_string()), - worker: worker_id.clone(), - attempts: Attempt::new_with_value(0) - - }) - .await - .expect("failed to acknowledge the job"); - + // Refresh our job let job = get_job(&mut storage, job_id).await; let ctx = job.get::().unwrap(); - // TODO: Currently ack is done in the background - // assert_eq!(*ctx.status(), State::Done); - // assert!(ctx.done_at().is_some()); - - cleanup(storage, &worker_id).await; + assert_eq!(*ctx.status(), State::Running); + assert_eq!(*ctx.lock_by(), Some(worker_id.clone())); + assert!(ctx.lock_at().is_some()); } #[tokio::test] @@ -765,9 +754,7 @@ mod tests { let job = get_job(&mut storage, job_id).await; let ctx = job.get::().unwrap(); assert_eq!(*ctx.status(), State::Killed); - // TODO: assert!(ctx.done_at().is_some()); - - cleanup(storage, &worker_id).await; + assert!(ctx.done_at().is_some()); } #[tokio::test] @@ -793,8 +780,6 @@ mod tests { assert!(ctx.lock_by().is_none()); assert!(ctx.lock_at().is_none()); assert_eq!(*ctx.last_error(), Some("Job was abandoned".to_string())); - - cleanup(storage, &worker_id).await; } #[tokio::test] @@ -822,7 +807,5 @@ mod tests { assert_eq!(*ctx.status(), State::Running); assert_eq!(*ctx.lock_by(), Some(worker_id.clone())); - - cleanup(storage, &worker_id).await; } } diff --git a/packages/apalis-sql/src/sqlite.rs b/packages/apalis-sql/src/sqlite.rs index fba94bf0..5991e61c 100644 --- a/packages/apalis-sql/src/sqlite.rs +++ b/packages/apalis-sql/src/sqlite.rs @@ -14,9 +14,9 @@ use apalis_core::task::task_id::TaskId; use apalis_core::worker::WorkerId; use apalis_core::{Backend, BoxCodec}; use async_stream::try_stream; +use chrono::Utc; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use serde::{de::DeserializeOwned, Serialize}; -use chrono::Utc; use sqlx::{Pool, Row, Sqlite}; use std::any::type_name; use std::convert::TryInto; @@ -188,7 +188,6 @@ impl SqliteStorage { let config = self.config.clone(); try_stream! { loop { - apalis_core::sleep(interval).await; let tx = pool.clone(); let mut tx = tx.acquire().await?; let job_type = &config.namespace; @@ -217,6 +216,7 @@ impl SqliteStorage { } } }; + apalis_core::sleep(interval).await; } } } @@ -462,7 +462,7 @@ impl Backend Ack for SqliteStorage { .bind(res.worker.to_string()) .bind(result) .bind(calculate_status(&res.result).to_string()) - .bind(res.attempts.current() as i64) + .bind(res.attempts.current() as i64 + 1) .execute(&pool) .await?; Ok(()) @@ -503,15 +503,25 @@ impl Ack for SqliteStorage { mod tests { use crate::context::State; + use crate::sql_storage_tests; use super::*; use apalis_core::task::attempt::Attempt; + use apalis_core::test_utils::DummyService; + use chrono::Utc; + use email_service::example_good_email; use email_service::Email; use futures::StreamExt; - use chrono::Utc; + + use apalis_core::generic_storage_test; + use apalis_core::test_utils::apalis_test_service_fn; + use apalis_core::test_utils::TestWrapper; + + generic_storage_test!(setup); + sql_storage_tests!(setup::, SqliteStorage, Email); /// migrate DB and return a storage instance. - async fn setup() -> SqliteStorage { + async fn setup() -> SqliteStorage { // Because connections cannot be shared across async runtime // (different runtimes are created for each test), // we don't share the storage and tests must be run sequentially. @@ -519,7 +529,7 @@ mod tests { SqliteStorage::setup(&pool) .await .expect("failed to migrate DB"); - let storage = SqliteStorage::::new(pool); + let storage = SqliteStorage::::new(pool); storage } @@ -539,16 +549,6 @@ mod tests { assert_eq!(len, 1); } - struct DummyService {} - - fn example_email() -> Email { - Email { - subject: "Test Subject".to_string(), - to: "example@postgres".to_string(), - text: "Some Text".to_string(), - } - } - async fn consume_one( storage: &mut SqliteStorage, worker_id: &WorkerId, @@ -593,10 +593,12 @@ mod tests { #[tokio::test] async fn test_consume_last_pushed_job() { let mut storage = setup().await; - push_email(&mut storage, example_email()).await; - let worker_id = register_worker(&mut storage).await; + push_email(&mut storage, example_good_email()).await; + let len = storage.len().await.expect("Could not fetch the jobs count"); + assert_eq!(len, 1); + let job = consume_one(&mut storage, &worker_id).await; let ctx = job.get::().unwrap(); assert_eq!(*ctx.status(), State::Running); @@ -607,21 +609,20 @@ mod tests { #[tokio::test] async fn test_acknowledge_job() { let mut storage = setup().await; - push_email(&mut storage, example_email()).await; - let worker_id = register_worker(&mut storage).await; + push_email(&mut storage, example_good_email()).await; let job = consume_one(&mut storage, &worker_id).await; - let ctx = job.get::().unwrap(); - let job_id = ctx.id(); + let ctx = job.get::(); + assert!(ctx.is_some()); + let job_id = ctx.unwrap().id(); storage .ack(AckResponse { acknowledger: job_id.clone(), result: Ok("Success".to_string()), worker: worker_id.clone(), - attempts: Attempt::new_with_value(0) - + attempts: Attempt::new_with_value(1), }) .await .expect("failed to acknowledge the job"); @@ -636,7 +637,7 @@ mod tests { async fn test_kill_job() { let mut storage = setup().await; - push_email(&mut storage, example_email()).await; + push_email(&mut storage, example_good_email()).await; let worker_id = register_worker(&mut storage).await; @@ -659,7 +660,7 @@ mod tests { async fn test_heartbeat_renqueueorphaned_pulse_last_seen_6min() { let mut storage = setup().await; - push_email(&mut storage, example_email()).await; + push_email(&mut storage, example_good_email()).await; let six_minutes_ago = Utc::now() - Duration::from_secs(6 * 60); @@ -675,19 +676,18 @@ mod tests { let job_id = ctx.id(); let job = get_job(&mut storage, job_id).await; let ctx = job.get::().unwrap(); - // TODO: rework these assertions - // assert_eq!(*ctx.status(), State::Pending); - // assert!(ctx.done_at().is_none()); - // assert!(ctx.lock_by().is_none()); - // assert!(ctx.lock_at().is_none()); - // assert_eq!(*ctx.last_error(), Some("Job was abandoned".to_string())); + assert_eq!(*ctx.status(), State::Running); + assert!(ctx.done_at().is_none()); + assert!(ctx.lock_by().is_some()); + assert!(ctx.lock_at().is_some()); + assert_eq!(*ctx.last_error(), Some("".to_string())); //TODO: Fix this } #[tokio::test] async fn test_heartbeat_renqueueorphaned_pulse_last_seen_4min() { let mut storage = setup().await; - push_email(&mut storage, example_email()).await; + push_email(&mut storage, example_good_email()).await; let four_minutes_ago = Utc::now() - Duration::from_secs(4 * 60); let worker_id = register_worker_at(&mut storage, four_minutes_ago.timestamp()).await; diff --git a/src/layers/catch_panic/mod.rs b/src/layers/catch_panic/mod.rs index c9c17917..6fe95879 100644 --- a/src/layers/catch_panic/mod.rs +++ b/src/layers/catch_panic/mod.rs @@ -2,6 +2,7 @@ use std::fmt; use std::future::Future; use std::panic::{catch_unwind, AssertUnwindSafe}; use std::pin::Pin; +use std::sync::Arc; use std::task::{Context, Poll}; use apalis_core::error::Error; @@ -100,10 +101,10 @@ where } else { "Unknown panic".to_string() }; - Poll::Ready(Err(Error::Failed(Box::new(PanicError( + Poll::Ready(Err(Error::Failed(Arc::new(Box::new(PanicError( panic_info, Backtrace::new(), - ))))) + )))))) } } } From 6d2a1e288eda082600cc450509de91600fa23f12 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Fri, 19 Jul 2024 09:50:29 +0300 Subject: [PATCH 34/59] bump: to 0.6.0-rc.4 (#377) --- Cargo.toml | 4 ++-- packages/apalis-core/Cargo.toml | 2 +- packages/apalis-core/src/lib.rs | 1 - packages/apalis-cron/Cargo.toml | 4 ++-- packages/apalis-cron/src/lib.rs | 2 +- packages/apalis-redis/Cargo.toml | 4 ++-- packages/apalis-sql/Cargo.toml | 4 ++-- packages/apalis-sql/src/from_row.rs | 1 - 8 files changed, 10 insertions(+), 12 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index a1510edd..ece12666 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis" -version = "0.6.0-rc.3" +version = "0.6.0-rc.4" authors = ["Geoffrey Mureithi "] description = "Simple, extensible multithreaded background job processing for Rust" repository = "https://github.com/geofmureithi/apalis" @@ -54,7 +54,7 @@ layers = [ docsrs = ["document-features"] [dependencies.apalis-core] -version = "0.6.0-rc.3" +version = "0.6.0-rc.4" default-features = false path = "./packages/apalis-core" diff --git a/packages/apalis-core/Cargo.toml b/packages/apalis-core/Cargo.toml index b04ad9cf..7afb7f59 100644 --- a/packages/apalis-core/Cargo.toml +++ b/packages/apalis-core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-core" -version = "0.6.0-rc.3" +version = "0.6.0-rc.4" authors = ["Njuguna Mureithi "] edition = "2021" license = "MIT" diff --git a/packages/apalis-core/src/lib.rs b/packages/apalis-core/src/lib.rs index 430014ae..ae664b56 100644 --- a/packages/apalis-core/src/lib.rs +++ b/packages/apalis-core/src/lib.rs @@ -174,7 +174,6 @@ impl crate::executor::Executor for TestExecutor { pub mod test_utils { use crate::error::BoxDynError; use crate::request::Request; - use crate::storage::Storage; use crate::task::task_id::TaskId; use crate::worker::WorkerId; use crate::Backend; diff --git a/packages/apalis-cron/Cargo.toml b/packages/apalis-cron/Cargo.toml index f6b42060..c79391e7 100644 --- a/packages/apalis-cron/Cargo.toml +++ b/packages/apalis-cron/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-cron" -version = "0.6.0-rc.3" +version = "0.6.0-rc.4" edition = "2021" authors = ["Njuguna Mureithi "] license = "MIT" @@ -9,7 +9,7 @@ description = "A simple yet extensible library for cron-like job scheduling for # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.3", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.4", default-features = false, features = [ "sleep", ] } cron = "0.12.1" diff --git a/packages/apalis-cron/src/lib.rs b/packages/apalis-cron/src/lib.rs index babcf245..3610d43a 100644 --- a/packages/apalis-cron/src/lib.rs +++ b/packages/apalis-cron/src/lib.rs @@ -47,7 +47,7 @@ //! let worker = WorkerBuilder::new("morning-cereal") //! .layer(RetryLayer::new(RetryPolicy::retries(5))) //! .data(FakeService) -//! .stream(CronStream::new(schedule).into_stream()) +//! .backend(CronStream::new(schedule)) //! .build_fn(send_reminder); //! Monitor::::new() //! .register(worker) diff --git a/packages/apalis-redis/Cargo.toml b/packages/apalis-redis/Cargo.toml index bd0742ed..240aae75 100644 --- a/packages/apalis-redis/Cargo.toml +++ b/packages/apalis-redis/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-redis" -version = "0.6.0-rc.3" +version = "0.6.0-rc.4" authors = ["Njuguna Mureithi "] edition = "2021" readme = "../../README.md" @@ -11,7 +11,7 @@ description = "Redis Storage for apalis: use Redis for background jobs and messa # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.3", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.4", default-features = false, features = [ "sleep", "json", ] } diff --git a/packages/apalis-sql/Cargo.toml b/packages/apalis-sql/Cargo.toml index b6c0431e..2498233f 100644 --- a/packages/apalis-sql/Cargo.toml +++ b/packages/apalis-sql/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-sql" -version = "0.6.0-rc.3" +version = "0.6.0-rc.4" authors = ["Njuguna Mureithi "] edition = "2021" readme = "../../README.md" @@ -25,7 +25,7 @@ features = ["chrono"] [dependencies] serde = { version = "1", features = ["derive"] } serde_json = "1" -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.3", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.4", default-features = false, features = [ "sleep", "json", ] } diff --git a/packages/apalis-sql/src/from_row.rs b/packages/apalis-sql/src/from_row.rs index 89d6628b..6c7886dd 100644 --- a/packages/apalis-sql/src/from_row.rs +++ b/packages/apalis-sql/src/from_row.rs @@ -131,7 +131,6 @@ impl<'r, T: Decode<'r, sqlx::Postgres> + Type> fn from_row(row: &'r sqlx::postgres::PgRow) -> Result { use sqlx::Row; use std::str::FromStr; - type Timestamp = i64; let job: T = row.try_get("job")?; let id: TaskId = From cc58602b7e284cf258ddcb78c01e5ea988109b0e Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Fri, 19 Jul 2024 10:59:57 +0300 Subject: [PATCH 35/59] fix: handle 0 retries (#378) --- src/layers/retry/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/layers/retry/mod.rs b/src/layers/retry/mod.rs index 53db3df3..7d455c07 100644 --- a/src/layers/retry/mod.rs +++ b/src/layers/retry/mod.rs @@ -45,8 +45,8 @@ where // so don't retry... None } - Err(_) if (self.retries - ctx.current() > 0) => Some(future::ready(self.clone())), Err(_) if self.retries == 0 => None, + Err(_) if (self.retries - ctx.current() > 0) => Some(future::ready(self.clone())), Err(_) => None, } } From be1674f7d2883ccf73bb9b292443d8bd0622c3f7 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Thu, 25 Jul 2024 21:07:30 +0300 Subject: [PATCH 36/59] fix: ack api to allow backend to handle differently (#383) * fix: ack api to allow backend to handle differently * fix: related to storage tests * fix: calculate status for postgres --- examples/actix-web/src/main.rs | 1 + examples/redis-mq-example/src/main.rs | 33 +++-- packages/apalis-core/src/builder.rs | 17 ++- packages/apalis-core/src/layers.rs | 130 ++++++++---------- packages/apalis-core/src/lib.rs | 47 ++++--- packages/apalis-core/src/memory.rs | 13 +- packages/apalis-core/src/monitor/mod.rs | 33 +++-- packages/apalis-core/src/mq/mod.rs | 4 +- packages/apalis-core/src/request.rs | 12 +- packages/apalis-core/src/service_fn.rs | 4 +- packages/apalis-core/src/storage/mod.rs | 4 +- .../apalis-core/src/worker/buffer/service.rs | 19 +-- packages/apalis-core/src/worker/mod.rs | 107 +++++++------- packages/apalis-cron/src/lib.rs | 4 +- packages/apalis-redis/Cargo.toml | 2 + packages/apalis-redis/src/storage.rs | 110 ++++++++------- packages/apalis-sql/src/from_row.rs | 3 +- packages/apalis-sql/src/lib.rs | 7 +- packages/apalis-sql/src/mysql.rs | 49 ++++--- packages/apalis-sql/src/postgres.rs | 36 +++-- packages/apalis-sql/src/sqlite.rs | 41 +++--- 21 files changed, 365 insertions(+), 311 deletions(-) diff --git a/examples/actix-web/src/main.rs b/examples/actix-web/src/main.rs index c816302b..a4b73b64 100644 --- a/examples/actix-web/src/main.rs +++ b/examples/actix-web/src/main.rs @@ -46,6 +46,7 @@ async fn main() -> Result<()> { WorkerBuilder::new("tasty-avocado") .layer(TraceLayer::new()) .backend(storage) + // .chain(|svc|svc.map_err(|e| Box::new(e))) .build_fn(send_email) }) .run_with_signal(signal::ctrl_c()); diff --git a/examples/redis-mq-example/src/main.rs b/examples/redis-mq-example/src/main.rs index ec8b8de8..b9406ce2 100644 --- a/examples/redis-mq-example/src/main.rs +++ b/examples/redis-mq-example/src/main.rs @@ -1,13 +1,12 @@ -use std::{marker::PhantomData, time::Duration}; +use std::{fmt::Debug, marker::PhantomData, time::Duration}; -use anyhow::Result; use apalis::{layers::tracing::TraceLayer, prelude::*}; use apalis_redis::{self, Config, RedisCodec, RedisJob}; use apalis_core::{ codec::json::JsonCodec, - layers::{Ack, AckLayer, AckResponse}, + layers::{Ack, AckLayer}, }; use email_service::{send_email, Email}; use futures::{channel::mpsc, SinkExt}; @@ -34,15 +33,15 @@ impl Clone for RedisMq { } } -impl Backend> for RedisMq { +impl Backend, Res> for RedisMq { type Stream = RequestStream>; - type Layer = AckLayer; + type Layer = AckLayer; - fn poll(mut self, worker_id: WorkerId) -> Poller { + fn poll(mut self, _worker_id: WorkerId) -> Poller { let (mut tx, rx) = mpsc::channel(self.config.get_buffer_size()); let stream: RequestStream> = Box::pin(rx); - let layer = AckLayer::new(self.clone(), worker_id); + let layer = AckLayer::new(self.clone()); let heartbeat = async move { loop { sleep(*self.config.get_poll_interval()).await; @@ -63,14 +62,18 @@ impl Backend> for RedisMq { } } -impl Ack for RedisMq { - type Acknowledger = String; +impl Ack for RedisMq { + type Context = String; - type Error = RsmqError; + type AckError = RsmqError; - async fn ack(&mut self, ack: AckResponse) -> Result<(), Self::Error> { - println!("Attempting to ACK {}", ack.acknowledger); - self.conn.delete_message("email", &ack.acknowledger).await?; + async fn ack( + &mut self, + ctx: &Self::Context, + res: &Result, + ) -> Result<(), Self::AckError> { + println!("Attempting to ACK {:?}", res); + self.conn.delete_message("email", &ctx).await?; Ok(()) } } @@ -105,7 +108,7 @@ impl MessageQueue for RedisMq { } } -async fn produce_jobs(mq: &mut RedisMq) -> Result<()> { +async fn produce_jobs(mq: &mut RedisMq) -> anyhow::Result<()> { for index in 0..1 { mq.enqueue(Email { to: index.to_string(), @@ -118,7 +121,7 @@ async fn produce_jobs(mq: &mut RedisMq) -> Result<()> { } #[tokio::main] -async fn main() -> Result<()> { +async fn main() -> anyhow::Result<()> { std::env::set_var("RUST_LOG", "debug"); tracing_subscriber::fmt::init(); diff --git a/packages/apalis-core/src/builder.rs b/packages/apalis-core/src/builder.rs index 4d837f38..6ed8d3f3 100644 --- a/packages/apalis-core/src/builder.rs +++ b/packages/apalis-core/src/builder.rs @@ -70,10 +70,13 @@ impl WorkerBuilder { } /// Set the source to a backend that implements [Backend] - pub fn backend>, NJ>( + pub fn backend, Res>, NJ, Res: Send>( self, - backend: NS, - ) -> WorkerBuilder { + backend: NB, + ) -> WorkerBuilder + where + Serv: Service, Response = Res>, + { WorkerBuilder { request: PhantomData, layer: self.layer, @@ -131,8 +134,12 @@ impl WorkerBuilder { } } -impl> + 'static, M: 'static, S> - WorkerFactory for WorkerBuilder +impl< + Req: Send + 'static + Sync, + P: Backend, S::Response> + 'static, + M: 'static, + S, + > WorkerFactory for WorkerBuilder where S: Service> + Send + 'static + Clone + Sync, S::Future: Send, diff --git a/packages/apalis-core/src/layers.rs b/packages/apalis-core/src/layers.rs index 82f9ca04..891be4a5 100644 --- a/packages/apalis-core/src/layers.rs +++ b/packages/apalis-core/src/layers.rs @@ -1,9 +1,9 @@ -use crate::task::attempt::Attempt; -use crate::{request::Request, worker::WorkerId}; +use crate::error::{BoxDynError, Error}; +use crate::request::Request; use futures::channel::mpsc::{SendError, Sender}; use futures::SinkExt; use futures::{future::BoxFuture, Future, FutureExt}; -use serde::{Deserialize, Serialize}; +use serde::Serialize; use std::marker::PhantomData; use std::{fmt, sync::Arc}; pub use tower::{ @@ -157,148 +157,136 @@ pub mod extensions { /// A trait for acknowledging successful processing /// This trait is called even when a task fails. /// This is a way of a [`Backend`] to save the result of a job or message -pub trait Ack { +pub trait Ack { /// The data to fetch from context to allow acknowledgement - type Acknowledger; + type Context; /// The error returned by the ack - type Error: std::error::Error; + type AckError: std::error::Error; + /// Acknowledges successful processing of the given request fn ack( &mut self, - response: AckResponse, - ) -> impl Future> + Send; -} - -/// ACK response -#[derive(Debug, Serialize, Deserialize)] -pub struct AckResponse { - /// The worker id - pub worker: WorkerId, - /// The acknowledger - pub acknowledger: A, - /// The stringified result - pub result: Result, - /// The number of attempts made by the request - pub attempts: Attempt, + ctx: &Self::Context, + result: &Result, + ) -> impl Future> + Send; } -/// A generic stream that emits (worker_id, task_id) -#[derive(Debug, Clone)] -pub struct AckStream(pub Sender>); - -impl Ack for AckStream { - type Acknowledger = A; - type Error = SendError; - fn ack( +impl Ack + for Sender<(Ctx, Result)> +{ + type AckError = SendError; + type Context = Ctx; + async fn ack( &mut self, - response: AckResponse, - ) -> impl Future> + Send { - self.0.send(response).boxed() + ctx: &Self::Context, + result: &Result, + ) -> Result<(), Self::AckError> { + let ctx = ctx.clone(); + self.send((ctx, result.clone())).await.unwrap(); + Ok(()) } } /// A layer that acknowledges a job completed successfully #[derive(Debug)] -pub struct AckLayer, J> { +pub struct AckLayer { ack: A, job_type: PhantomData, - worker_id: WorkerId, + res: PhantomData, } -impl, J> AckLayer { +impl AckLayer { /// Build a new [AckLayer] for a job - pub fn new(ack: A, worker_id: WorkerId) -> Self { + pub fn new(ack: A) -> Self { Self { ack, job_type: PhantomData, - worker_id, + res: PhantomData, } } } -impl Layer for AckLayer +impl Layer for AckLayer where S: Service> + Send + 'static, S::Error: std::error::Error + Send + Sync + 'static, S::Future: Send + 'static, - A: Ack + Clone + Send + Sync + 'static, + A: Ack + Clone + Send + Sync + 'static, { - type Service = AckService; + type Service = AckService; fn layer(&self, service: S) -> Self::Service { AckService { service, ack: self.ack.clone(), job_type: PhantomData, - worker_id: self.worker_id.clone(), + res: PhantomData, } } } /// The underlying service for an [AckLayer] #[derive(Debug)] -pub struct AckService { +pub struct AckService { service: SV, ack: A, job_type: PhantomData, - worker_id: WorkerId, + res: PhantomData, } -impl Clone for AckService { +impl Clone for AckService { fn clone(&self) -> Self { Self { ack: self.ack.clone(), job_type: PhantomData, - worker_id: self.worker_id.clone(), service: self.service.clone(), + res: PhantomData, } } } -impl Service> for AckService +impl Service> for AckService where SV: Service> + Send + Sync + 'static, - SV::Error: std::error::Error + Send + Sync + 'static, + >>::Error: Into + Send + Sync + 'static, >>::Future: std::marker::Send + 'static, - A: Ack + Send + 'static + Clone + Send + Sync, - T: 'static, - >>::Response: std::marker::Send + fmt::Debug + Sync, - >::Acknowledger: Sync + Send + Clone, + A: Ack>>::Response> + Send + 'static + Clone + Send + Sync, + T: 'static + Send, + >>::Response: std::marker::Send + fmt::Debug + Sync + Serialize, + >::Context: Sync + Send + Clone, { type Response = SV::Response; - type Error = SV::Error; + type Error = Error; type Future = BoxFuture<'static, Result>; fn poll_ready( &mut self, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { - self.service.poll_ready(cx) + self.service + .poll_ready(cx) + .map_err(|e| Error::Failed(Arc::new(e.into()))) } fn call(&mut self, request: Request) -> Self::Future { let mut ack = self.ack.clone(); - let worker_id = self.worker_id.clone(); - let data = request.get::<>::Acknowledger>().cloned(); - let attempts = request.get::().cloned().unwrap_or_default(); + let data = request + .get::<>::Context>() + .cloned(); let fut = self.service.call(request); let fut_with_ack = async move { - let res = fut.await; - let result = res - .as_ref() - .map(|ok| format!("{ok:?}")) - .map_err(|e| e.to_string()); - if let Some(task_id) = data { - if let Err(_e) = ack - .ack(AckResponse { - worker: worker_id, - acknowledger: task_id, - result, - attempts, - }) - .await - { + let res = fut.await.map_err(|err| { + let e: BoxDynError = err.into(); + // Try to downcast the error to see if it is already of type `Error` + if let Some(custom_error) = e.downcast_ref::() { + return custom_error.clone(); + } + Error::Failed(Arc::new(e)) + }); + + if let Some(ctx) = data { + if let Err(_e) = ack.ack(&ctx, &res).await { // TODO: Implement tracing in apalis core // tracing::error!("Acknowledgement Failed: {}", e); } diff --git a/packages/apalis-core/src/lib.rs b/packages/apalis-core/src/lib.rs index ae664b56..caa9fc6a 100644 --- a/packages/apalis-core/src/lib.rs +++ b/packages/apalis-core/src/lib.rs @@ -26,6 +26,7 @@ use std::sync::Arc; use futures::Stream; use poller::Poller; +use tower::Service; use worker::WorkerId; /// Represent utilities for creating worker instances. @@ -72,7 +73,7 @@ pub mod codec; /// /// [`Storage`]: crate::storage::Storage /// [`MessageQueue`]: crate::mq::MessageQueue -pub trait Backend { +pub trait Backend { /// The stream to be produced by the backend type Stream: Stream, crate::error::Error>>; @@ -80,7 +81,10 @@ pub trait Backend { type Layer; /// Returns a poller that is ready for streaming - fn poll(self, worker: WorkerId) -> Poller; + fn poll>( + self, + worker: WorkerId, + ) -> Poller; } /// This allows encoding and decoding of requests in different backends @@ -209,10 +213,11 @@ pub mod test_utils { /// A generic backend wrapper that polls and executes jobs #[derive(Debug)] - pub struct TestWrapper { + pub struct TestWrapper { stop_tx: Sender<()>, res_rx: Receiver<(TaskId, Result)>, _p: PhantomData, + _r: PhantomData, backend: B, } /// A test wrapper to allow you to test without requiring a worker. @@ -247,9 +252,9 @@ pub mod test_utils { /// } ///} /// ```` - impl TestWrapper + impl TestWrapper where - B: Backend> + Send + Sync + 'static + Clone, + B: Backend, Res> + Send + Sync + 'static + Clone, Req: Send + 'static, B::Stream: Send + 'static, B::Stream: Stream>, crate::error::Error>> + Unpin, @@ -257,16 +262,23 @@ pub mod test_utils { /// Build a new instance provided a custom service pub fn new_with_service(backend: B, service: S) -> (Self, BoxFuture<'static, ()>) where - S: Service> + Send + 'static, + S: Service, Response = Res> + Send + 'static, B::Layer: Layer, - <>>::Layer as Layer>::Service: Service> + Send + 'static, - <<>>::Layer as Layer>::Service as Service>>::Response: Send + Debug, - <<>>::Layer as Layer>::Service as Service>>::Error: Send + Into + Sync, - <<>>::Layer as Layer>::Service as Service>>::Future: Send + 'static, + <, Res>>::Layer as Layer>::Service: + Service> + Send + 'static, + <<, Res>>::Layer as Layer>::Service as Service< + Request, + >>::Response: Send + Debug, + <<, Res>>::Layer as Layer>::Service as Service< + Request, + >>::Error: Send + Into + Sync, + <<, Res>>::Layer as Layer>::Service as Service< + Request, + >>::Future: Send + 'static, { let worker_id = WorkerId::new("test-worker"); let b = backend.clone(); - let mut poller = b.poll(worker_id); + let mut poller = b.poll::(worker_id); let (stop_tx, mut stop_rx) = channel::<()>(1); let (mut res_tx, res_rx) = channel(10); @@ -307,11 +319,12 @@ pub mod test_utils { } }; ( - Self { + TestWrapper { stop_tx, res_rx, _p: PhantomData, backend, + _r: PhantomData, }, poller.boxed(), ) @@ -319,7 +332,7 @@ pub mod test_utils { /// Stop polling pub fn stop(mut self) { - let _ = self.stop_tx.send(()); + self.stop_tx.try_send(()).unwrap(); } /// Gets the current state of results @@ -328,9 +341,9 @@ pub mod test_utils { } } - impl Deref for TestWrapper + impl Deref for TestWrapper where - B: Backend>, + B: Backend, Res>, { type Target = B; @@ -339,9 +352,9 @@ pub mod test_utils { } } - impl DerefMut for TestWrapper + impl DerefMut for TestWrapper where - B: Backend>, + B: Backend, Res>, { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.backend diff --git a/packages/apalis-core/src/memory.rs b/packages/apalis-core/src/memory.rs index 250190c4..b8f08b45 100644 --- a/packages/apalis-core/src/memory.rs +++ b/packages/apalis-core/src/memory.rs @@ -96,12 +96,12 @@ impl Stream for MemoryWrapper { } // MemoryStorage as a Backend -impl Backend> for MemoryStorage { +impl Backend, Res> for MemoryStorage { type Stream = BackendStream>>; type Layer = Identity; - fn poll(self, _worker: WorkerId) -> Poller { + fn poll(self, _worker: WorkerId) -> Poller { let stream = self.inner.map(|r| Ok(Some(r))).boxed(); Poller { stream: BackendStream::new(stream, self.controller), @@ -122,7 +122,14 @@ impl MessageQueue for MemoryStorage Result, ()> { - Ok(self.inner.receiver.lock().await.next().await.map(|r| r.req)) + Ok(self + .inner + .receiver + .lock() + .await + .next() + .await + .map(|r| r.args)) } async fn size(&mut self) -> Result { diff --git a/packages/apalis-core/src/monitor/mod.rs b/packages/apalis-core/src/monitor/mod.rs index beee0f56..82d19c4a 100644 --- a/packages/apalis-core/src/monitor/mod.rs +++ b/packages/apalis-core/src/monitor/mod.rs @@ -5,6 +5,7 @@ use std::{ }; use futures::{future::BoxFuture, Future, FutureExt}; +use serde::Serialize; use tower::{Layer, Service}; mod shutdown; @@ -81,22 +82,23 @@ impl Monitor { pub fn register< J: Send + Sync + 'static, S: Service> + Send + 'static, - P: Backend> + 'static, + P: Backend, Res> + 'static, + Res: 'static >( mut self, worker: Worker>, ) -> Self where S::Future: Send, - S::Response: 'static, + S::Response: 'static + Send + Sync + Serialize, S::Error: Send + Sync + 'static + Into, -

>>::Stream: Unpin + Send + 'static, +

, Res>>::Stream: Unpin + Send + 'static, P::Layer: Layer, - <

>>::Layer as Layer>::Service: Service>, - <

>>::Layer as Layer>::Service: Send, - <<

>>::Layer as Layer>::Service as Service>>::Future: + <

, Res>>::Layer as Layer>::Service: Service, Response = Res>, + <

, Res>>::Layer as Layer>::Service: Send, + <<

, Res>>::Layer as Layer>::Service as Service>>::Future: Send, - <<

>>::Layer as Layer>::Service as Service>>::Error: + <<

, Res>>::Layer as Layer>::Service as Service>>::Error: Send + Into + Sync, { self.workers.push(worker.with_monitor(&self)); @@ -117,7 +119,8 @@ impl Monitor { pub fn register_with_count< J: Send + Sync + 'static, S: Service> + Send + 'static, - P: Backend> + 'static, + P: Backend, Res> + 'static, + Res: 'static + Send, >( mut self, count: usize, @@ -125,16 +128,16 @@ impl Monitor { ) -> Self where S::Future: Send, - S::Response: 'static, + S::Response: 'static + Send + Sync + Serialize, S::Error: Send + Sync + 'static + Into, -

>>::Stream: Unpin + Send + 'static, +

, Res>>::Stream: Unpin + Send + 'static, P::Layer: Layer, - <

>>::Layer as Layer>::Service: Service>, - <

>>::Layer as Layer>::Service: Send, - <<

>>::Layer as Layer>::Service as Service>>::Future: + <

, Res>>::Layer as Layer>::Service: Service, Response = Res>, + <

, Res>>::Layer as Layer>::Service: Send, + <<

, Res>>::Layer as Layer>::Service as Service>>::Future: Send, - <<

>>::Layer as Layer>::Service as Service>>::Error: - Send + std::error::Error + Sync, + <<

, Res>>::Layer as Layer>::Service as Service>>::Error: + Send + Into + Sync, { let workers = worker.with_monitor_instances(count, &self); self.workers.extend(workers); diff --git a/packages/apalis-core/src/mq/mod.rs b/packages/apalis-core/src/mq/mod.rs index 217695ca..e6d9a2db 100644 --- a/packages/apalis-core/src/mq/mod.rs +++ b/packages/apalis-core/src/mq/mod.rs @@ -4,10 +4,8 @@ use futures::Future; -use crate::{request::Request, Backend}; - /// Represents a message queue that can be pushed and consumed. -pub trait MessageQueue: Backend> { +pub trait MessageQueue { /// The error produced by the queue type Error; diff --git a/packages/apalis-core/src/request.rs b/packages/apalis-core/src/request.rs index ac381472..ca0dc63b 100644 --- a/packages/apalis-core/src/request.rs +++ b/packages/apalis-core/src/request.rs @@ -13,7 +13,7 @@ use crate::{ #[derive(Serialize, Debug, Deserialize, Clone)] pub struct Request { - pub(crate) req: T, + pub(crate) args: T, #[serde(skip)] pub(crate) data: Extensions, } @@ -29,17 +29,17 @@ impl Request { /// Creates a request with context provided pub fn new_with_data(req: T, data: Extensions) -> Self { - Self { req, data } + Self { args: req, data } } /// Get the underlying reference of the request pub fn inner(&self) -> &T { - &self.req + &self.args } /// Take the underlying reference of the request pub fn take(self) -> T { - self.req + self.args } } @@ -64,12 +64,12 @@ pub type RequestFuture = BoxFuture<'static, T>; /// Represents a stream for T. pub type RequestStream = BoxStream<'static, Result, Error>>; -impl Backend> for RequestStream> { +impl Backend, Res> for RequestStream> { type Stream = Self; type Layer = Identity; - fn poll(self, _worker: WorkerId) -> Poller { + fn poll(self, _worker: WorkerId) -> Poller { Poller { stream: self, heartbeat: Box::pin(async {}), diff --git a/packages/apalis-core/src/service_fn.rs b/packages/apalis-core/src/service_fn.rs index 66007e4d..d89e4dfe 100644 --- a/packages/apalis-core/src/service_fn.rs +++ b/packages/apalis-core/src/service_fn.rs @@ -67,7 +67,7 @@ macro_rules! impl_service_fn { } fn call(&mut self, task: Request) -> Self::Future { - let fut = (self.f)(task.req, $($K::get(&task.data)),+); + let fut = (self.f)(task.args, $($K::get(&task.data)),+); fut.map(F::Output::into_response) } @@ -90,7 +90,7 @@ where } fn call(&mut self, task: Request) -> Self::Future { - let fut = (self.f)(task.req); + let fut = (self.f)(task.args); fut.map(F::Output::into_response) } diff --git a/packages/apalis-core/src/storage/mod.rs b/packages/apalis-core/src/storage/mod.rs index 5e2aedb3..f3b26fe3 100644 --- a/packages/apalis-core/src/storage/mod.rs +++ b/packages/apalis-core/src/storage/mod.rs @@ -2,13 +2,13 @@ use std::time::Duration; use futures::{stream::BoxStream, Future}; -use crate::{request::Request, Backend}; +use crate::request::Request; /// The result of sa stream produced by a [Storage] pub type StorageStream = BoxStream<'static, Result>, E>>; /// Represents a [Storage] that can persist a request. -pub trait Storage: Backend> { +pub trait Storage { /// The type of job that can be persisted type Job; diff --git a/packages/apalis-core/src/worker/buffer/service.rs b/packages/apalis-core/src/worker/buffer/service.rs index a176764c..a4a2764e 100644 --- a/packages/apalis-core/src/worker/buffer/service.rs +++ b/packages/apalis-core/src/worker/buffer/service.rs @@ -6,23 +6,24 @@ use super::{ use futures::channel::{mpsc, oneshot}; use futures::task::AtomicWaker; -use std::sync::Arc; use std::{ future::Future, task::{Context, Poll}, }; +use std::{marker::PhantomData, sync::Arc}; use tower::Service; /// Adds an mpsc buffer in front of an inner service. /// /// See the module documentation for more details. #[derive(Debug)] -pub struct Buffer { +pub struct Buffer { tx: PollSender>, handle: Handle, + res: PhantomData, } -impl Buffer +impl Buffer where F: 'static, { @@ -33,7 +34,7 @@ where /// the background `Worker` that you can then spawn. pub fn pair(service: S, bound: usize) -> (Self, Worker) where - S: Service + Send + 'static, + S: Service + Send + 'static, F: Send, S::Error: Into + Send + Sync, Req: Send + 'static, @@ -43,6 +44,7 @@ where let buffer = Self { tx: PollSender::new(tx), handle, + res: PhantomData, }; (buffer, worker) } @@ -52,13 +54,13 @@ where } } -impl Service for Buffer +impl Service for Buffer where - F: Future> + Send + 'static, + F: Future> + Send + 'static, E: Into, Req: Send + 'static, { - type Response = Rsp; + type Response = Res; type Error = tower::BoxError; type Future = ResponseFuture; @@ -84,7 +86,7 @@ where } } -impl Clone for Buffer +impl Clone for Buffer where Req: Send + 'static, F: Send + 'static, @@ -93,6 +95,7 @@ where Self { handle: self.handle.clone(), tx: self.tx.clone(), + res: PhantomData, } } } diff --git a/packages/apalis-core/src/worker/mod.rs b/packages/apalis-core/src/worker/mod.rs index 6d911ecf..e5eb6b88 100644 --- a/packages/apalis-core/src/worker/mod.rs +++ b/packages/apalis-core/src/worker/mod.rs @@ -219,29 +219,30 @@ impl Worker> { impl Worker> { /// Start a worker with a custom executor - pub fn with_executor(self, executor: E) -> Worker> + pub fn with_executor(self, executor: E) -> Worker> where S: Service> + Send + 'static, - P: Backend> + 'static, + P: Backend, Res> + 'static, J: Send + 'static + Sync, S::Future: Send, - S::Response: 'static, + S::Response: 'static + Send + Sync + Serialize, S::Error: Send + Sync + 'static + Into, S::Error: Send + Sync + 'static + Into, -

>>::Stream: Unpin + Send + 'static, +

, Res>>::Stream: Unpin + Send + 'static, E: Executor + Clone + Send + 'static + Sync, P::Layer: Layer, - <

>>::Layer as Layer>::Service: Service>, - <

>>::Layer as Layer>::Service: Send, - <<

>>::Layer as Layer>::Service as Service>>::Future: + <

, Res>>::Layer as Layer>::Service: Service, Response = Res> + 'static, + <

, Res>>::Layer as Layer>::Service: Send, + <<

, Res>>::Layer as Layer>::Service as Service>>::Future: Send, - <<

>>::Layer as Layer>::Service as Service>>::Error: + <<

, Res>>::Layer as Layer>::Service as Service>>::Error: Send + std::error::Error + Sync, { let notifier = Notify::new(); let service = self.state.service; let backend = self.state.backend; - let poller = backend.poll(self.id.clone()); + let poller = backend + .poll::<<

, Res>>::Layer as Layer>::Service>(self.id.clone()); let polling = poller.heartbeat.shared(); let default_layer = poller.layer; let service = default_layer.layer(service); @@ -260,30 +261,33 @@ impl Worker> { } /// Run as a monitored worker - pub fn with_monitor(self, monitor: &Monitor) -> Worker> + pub fn with_monitor(self, monitor: &Monitor) -> Worker> where S: Service> + Send + 'static, - P: Backend> + 'static, + P: Backend, Res> + 'static, J: Send + 'static + Sync, S::Future: Send, - S::Response: 'static, + S::Response: 'static + Send + Sync + Serialize, S::Error: Send + Sync + 'static + Into, -

>>::Stream: Unpin + Send + 'static, +

, Res>>::Stream: Unpin + Send + 'static, E: Executor + Clone + Send + 'static + Sync, P::Layer: Layer, - <

>>::Layer as Layer>::Service: Service>, - <

>>::Layer as Layer>::Service: Send, - <<

>>::Layer as Layer>::Service as Service>>::Future: - Send, - <<

>>::Layer as Layer>::Service as Service>>::Error: - Send + Into + Sync, + <

, Res>>::Layer as Layer>::Service: Service, Response = Res>, + <

, Res>>::Layer as Layer>::Service: Send, + <<

, Res>>::Layer as Layer>::Service as Service< + Request, + >>::Future: Send, + <<

, Res>>::Layer as Layer>::Service as Service< + Request, + >>::Error: Send + Into + Sync, { let notifier = Notify::new(); let service = self.state.service; let backend = self.state.backend; let executor = monitor.executor().clone(); let context = monitor.context().clone(); - let poller = backend.poll(self.id.clone()); + let poller = backend + .poll::<<

, Res>>::Layer as Layer>::Service>(self.id.clone()); let default_layer = poller.layer; let service = default_layer.layer(service); let polling = poller.heartbeat.shared(); @@ -302,34 +306,37 @@ impl Worker> { } /// Run a specified amounts of instances - pub fn with_monitor_instances( + pub fn with_monitor_instances( self, instances: usize, monitor: &Monitor, ) -> Vec>> where S: Service> + Send + 'static, - P: Backend> + 'static, + P: Backend, Res> + 'static, J: Send + 'static + Sync, S::Future: Send, - S::Response: 'static, + S::Response: 'static + Send + Sync + Serialize, S::Error: Send + Sync + 'static + Into, -

>>::Stream: Unpin + Send + 'static, +

, Res>>::Stream: Unpin + Send + 'static, E: Executor + Clone + Send + 'static + Sync, P::Layer: Layer, - <

>>::Layer as Layer>::Service: Service>, - <

>>::Layer as Layer>::Service: Send, - <<

>>::Layer as Layer>::Service as Service>>::Future: - Send, - <<

>>::Layer as Layer>::Service as Service>>::Error: - Send + Into + Sync, + <

, Res>>::Layer as Layer>::Service: Service, Response = Res>, + <

, Res>>::Layer as Layer>::Service: Send, + <<

, Res>>::Layer as Layer>::Service as Service< + Request, + >>::Future: Send, + <<

, Res>>::Layer as Layer>::Service as Service< + Request, + >>::Error: Send + Into + Sync, { let notifier = Notify::new(); let service = self.state.service; let backend = self.state.backend; let executor = monitor.executor().clone(); let context = monitor.context().clone(); - let poller = backend.poll(self.id.clone()); + let poller = backend + .poll::<<

, Res>>::Layer as Layer>::Service>(self.id.clone()); let default_layer = poller.layer; let service = default_layer.layer(service); let (service, poll_worker) = Buffer::pair(service, instances); @@ -357,35 +364,37 @@ impl Worker> { } /// Run specified worker instances via a specific executor - pub fn with_executor_instances( + pub fn with_executor_instances( self, instances: usize, executor: E, ) -> Vec>> where - S: Service> + Send + 'static, - P: Backend> + 'static, + S: Service, Response = Res> + Send + 'static, + P: Backend, Res> + 'static, J: Send + 'static + Sync, S::Future: Send, - S::Response: 'static, + S::Response: 'static + Send + Sync + Serialize, S::Error: Send + Sync + 'static + Into, S::Error: Send + Sync + 'static + Into, -

>>::Stream: Unpin + Send + 'static, +

, Res>>::Stream: Unpin + Send + 'static, E: Executor + Clone + Send + 'static + Sync, P::Layer: Layer, - <

>>::Layer as Layer>::Service: Service>, - <

>>::Layer as Layer>::Service: Send, - <<

>>::Layer as Layer>::Service as Service>>::Future: - Send, - <<

>>::Layer as Layer>::Service as Service>>::Error: - Send + Into + Sync, + <

, Res>>::Layer as Layer>::Service: Service, Response = Res>, + <

, Res>>::Layer as Layer>::Service: Send, + <<

, Res>>::Layer as Layer>::Service as Service< + Request, + >>::Future: Send, + <<

, Res>>::Layer as Layer>::Service as Service< + Request, + >>::Error: Send + Into + Sync, { let worker_id = self.id.clone(); let notifier = Notify::new(); let service = self.state.service; let (service, poll_worker) = Buffer::pair(service, instances); let backend = self.state.backend; - let poller = backend.poll(worker_id.clone()); + let poller = backend.poll::(worker_id.clone()); let polling = poller.heartbeat.shared(); let worker_stream = WorkerStream::new(poller.stream, notifier.clone()) .into_future() @@ -406,7 +415,7 @@ impl Worker> { workers } - pub(crate) fn build_worker_instance( + pub(crate) fn build_worker_instance( id: WorkerId, service: LS, executor: E, @@ -416,11 +425,11 @@ impl Worker> { context: Option, ) -> Worker> where - LS: Service> + Send + 'static, + LS: Service, Response = Res> + Send + 'static, LS::Future: Send + 'static, LS::Response: 'static, LS::Error: Send + Sync + Into + 'static, - P: Backend>, + P: Backend, Res>, E: Executor + Send + Clone + 'static + Sync, J: Sync + Send + 'static, S: 'static, @@ -445,17 +454,17 @@ impl Worker> { worker } - pub(crate) async fn build_instance( + pub(crate) async fn build_instance( instance: usize, service: LS, worker: Worker>, notifier: WorkerNotify>, Error>>, ) where - LS: Service> + Send + 'static, + LS: Service, Response = Res> + Send + 'static, LS::Future: Send + 'static, LS::Response: 'static, LS::Error: Send + Sync + Into + 'static, - P: Backend>, + P: Backend, Res>, E: Executor + Send + Clone + 'static + Sync, { if let Some(ctx) = worker.state.context.as_ref() { diff --git a/packages/apalis-cron/src/lib.rs b/packages/apalis-cron/src/lib.rs index 3610d43a..0faef0e7 100644 --- a/packages/apalis-cron/src/lib.rs +++ b/packages/apalis-cron/src/lib.rs @@ -134,7 +134,7 @@ where } } -impl Backend> for CronStream +impl Backend, Res> for CronStream where J: From> + Send + Sync + 'static, Tz: TimeZone + Send + Sync + 'static, @@ -144,7 +144,7 @@ where type Layer = Identity; - fn poll(self, _worker: WorkerId) -> Poller { + fn poll(self, _worker: WorkerId) -> Poller { let stream = self.into_stream(); Poller::new(stream, async {}) } diff --git a/packages/apalis-redis/Cargo.toml b/packages/apalis-redis/Cargo.toml index 240aae75..f757a2e6 100644 --- a/packages/apalis-redis/Cargo.toml +++ b/packages/apalis-redis/Cargo.toml @@ -21,6 +21,7 @@ redis = { version = "0.25.3", default-features = false, features = [ "connection-manager", ] } serde = "1" +serde_json = "1" log = "0.4.21" chrono = { version = "0.4.38", default-features = false, features = [ "clock", @@ -31,6 +32,7 @@ futures = "0.3.30" tokio = { version = "1", features = ["rt", "net"], optional = true } async-std = { version = "1.12.0", optional = true } async-trait = "0.1.80" +tower = "0.4" [dev-dependencies] diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index faad9a48..448da355 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -1,6 +1,7 @@ use apalis_core::codec::json::JsonCodec; use apalis_core::data::Extensions; -use apalis_core::layers::{Ack, AckLayer, AckResponse, AckStream}; +use apalis_core::error::Error; +use apalis_core::layers::{Ack, AckLayer, Service}; use apalis_core::poller::controller::Controller; use apalis_core::poller::stream::BackendStream; use apalis_core::poller::Poller; @@ -12,7 +13,7 @@ use apalis_core::task::task_id::TaskId; use apalis_core::worker::WorkerId; use apalis_core::{Backend, Codec}; use chrono::Utc; -use futures::channel::mpsc; +use futures::channel::mpsc::{self, Sender}; use futures::{select, FutureExt, SinkExt, StreamExt, TryFutureExt}; use log::*; use redis::aio::ConnectionLike; @@ -24,6 +25,7 @@ use std::fmt::{self, Debug}; use std::io; use std::num::TryFromIntError; use std::sync::Arc; +use std::time::SystemTime; use std::{marker::PhantomData, time::Duration}; /// Shorthand to create a client and connect @@ -145,8 +147,6 @@ impl RedisJob { impl From> for Request { fn from(val: RedisJob) -> Self { let mut data = Extensions::new(); - data.insert(val.ctx.id.clone()); - data.insert(Attempt::new_with_value(val.ctx.attempts)); data.insert(val.ctx); Request::new_with_data(val.job, data) } @@ -155,17 +155,13 @@ impl From> for Request { impl TryFrom> for RedisJob { type Error = RedisError; fn try_from(val: Request) -> Result { - let task_id = val - .get::() + let ctx = val + .get::() .cloned() - .ok_or((ErrorKind::IoError, "Missing TaskId"))?; - let attempts = val.get::().cloned().unwrap_or_default(); + .ok_or((ErrorKind::IoError, "Missing Context"))?; Ok(RedisJob { job: val.take(), - ctx: Context { - attempts: attempts.current(), - id: task_id, - }, + ctx, }) } } @@ -173,7 +169,10 @@ impl TryFrom> for RedisJob { #[derive(Clone, Debug, Serialize, Deserialize, Default)] pub struct Context { id: TaskId, - attempts: usize, + attempts: Attempt, + max_attempts: usize, + lock_by: Option, + run_at: Option, } /// Config for a [RedisStorage] @@ -455,17 +454,20 @@ impl RedisStorage { impl< T: Serialize + DeserializeOwned + Sync + Send + Unpin + 'static, Conn: ConnectionLike + Send + Sync + 'static, - > Backend> for RedisStorage + Res: Send + Serialize + Sync + 'static, + > Backend, Res> for RedisStorage { type Stream = BackendStream>>; - type Layer = AckLayer, T>; + type Layer = AckLayer)>, T, Res>; - fn poll(mut self, worker: WorkerId) -> Poller { + fn poll>>( + mut self, + worker: WorkerId, + ) -> Poller { let (mut tx, rx) = mpsc::channel(self.config.buffer_size); - let (ack_tx, ack_rx) = mpsc::channel(self.config.buffer_size); - let ack = AckStream(ack_tx); - let layer = AckLayer::new(ack, worker.clone()); + let (ack, ack_rx) = mpsc::channel(self.config.buffer_size); + let layer = AckLayer::new(ack); let controller = self.controller.clone(); let config = self.config.clone(); let stream: RequestStream> = Box::pin(rx); @@ -512,8 +514,8 @@ impl< } id_to_ack = ack_stream.next() => { - if let Some(res) = id_to_ack { - if let Err(e) = self.ack(res).await { + if let Some((ctx, res)) = id_to_ack { + if let Err(e) = self.ack(&ctx, &res).await { error!("AckError: {}", e); } } @@ -529,17 +531,27 @@ impl< } } -impl Ack +impl Ack for RedisStorage +where + Res: Serialize + Sync + Send + 'static, { - type Acknowledger = TaskId; - type Error = RedisError; - async fn ack(&mut self, res: AckResponse) -> Result<(), RedisError> { - let inflight_set = format!("{}:{}", self.config.inflight_jobs_set(), res.worker); + type Context = Context; + type AckError = RedisError; + async fn ack( + &mut self, + ctx: &Self::Context, + res: &Result, + ) -> Result<(), RedisError> { + let inflight_set = format!( + "{}:{}", + self.config.inflight_jobs_set(), + ctx.lock_by.clone().unwrap() + ); let now: i64 = Utc::now().timestamp(); - match res.result { + match res { Ok(success_res) => { let done_job = self.scripts.done_job.clone(); let done_jobs_set = &self.config.done_jobs_set(); @@ -547,27 +559,21 @@ impl Ack .key(inflight_set) .key(done_jobs_set) .key(self.config.job_data_hash()) - .arg(res.acknowledger.to_string()) + .arg(ctx.id.to_string()) .arg(now) - .arg(success_res) + .arg(serde_json::to_string(success_res).unwrap()) .invoke_async(&mut self.conn) .await } Err(e) => match e { - e if e.contains("BackoffRetry") => { - //do nothing, should be handled by BackoffLayer - Ok(()) - } - - // TODO: Just automatically retry - e if e.starts_with("RetryError") => { + Error::Abort(e) => { let retry_job = self.scripts.retry_job.clone(); let retry_jobs_set = &self.config.scheduled_jobs_set(); retry_job .key(inflight_set) .key(retry_jobs_set) .key(self.config.job_data_hash()) - .arg(res.acknowledger.to_string()) + .arg(ctx.id.to_string()) .arg(now) .arg(e) .invoke_async(&mut self.conn) @@ -581,9 +587,9 @@ impl Ack .key(inflight_set) .key(kill_jobs_set) .key(self.config.job_data_hash()) - .arg(res.acknowledger.to_string()) + .arg(ctx.id.to_string()) .arg(now) - .arg(e) + .arg(e.to_string()) .invoke_async(&mut self.conn) .await } @@ -623,14 +629,12 @@ impl< let mut processed = vec![]; for job in jobs { let bytes = deserialize_job(&job)?; - let request = codec + let mut request = codec .decode(bytes) - .map(Into::into) - .map(|mut req: Request| { - req.insert(Namespace(namespace.clone())); - req - }) .map_err(|e| build_error(&e.to_string()))?; + request.ctx_mut().lock_by = Some(worker_id.clone()); + let mut request: Request = request.into(); + request.insert(Namespace(namespace.clone())); processed.push(request) } Ok(processed) @@ -697,8 +701,8 @@ where let signal_list = self.config.signal_list(); let job_id = TaskId::new(); let ctx = Context { - attempts: 0, id: job_id.clone(), + ..Default::default() }; let job = self .codec @@ -721,8 +725,8 @@ where let scheduled_jobs_set = self.config.scheduled_jobs_set(); let job_id = TaskId::new(); let ctx = Context { - attempts: 0, id: job_id.clone(), + ..Default::default() }; let job = RedisJob { job, ctx }; let job = self @@ -1081,20 +1085,14 @@ mod tests { let worker_id = register_worker(&mut storage).await; let job = consume_one(&mut storage, &worker_id).await; - let job_id = &job.get::().unwrap().id; - let attempts = job.get::().unwrap().clone(); + let ctx = job.get::().unwrap(); storage - .ack(AckResponse { - acknowledger: job_id.clone(), - result: Ok("Success".to_string()), - worker: worker_id.clone(), - attempts, - }) + .ack(ctx, &Ok(())) .await .expect("failed to acknowledge the job"); - let _job = get_job(&mut storage, &job_id).await; + let _job = get_job(&mut storage, &ctx.id).await; } #[tokio::test] diff --git a/packages/apalis-sql/src/from_row.rs b/packages/apalis-sql/src/from_row.rs index 6c7886dd..bcc2a655 100644 --- a/packages/apalis-sql/src/from_row.rs +++ b/packages/apalis-sql/src/from_row.rs @@ -1,6 +1,6 @@ use apalis_core::task::task_id::TaskId; use apalis_core::{data::Extensions, request::Request, worker::WorkerId}; -use chrono::Utc; + use serde::{Deserialize, Serialize}; use sqlx::{Decode, Type}; @@ -129,6 +129,7 @@ impl<'r, T: Decode<'r, sqlx::Postgres> + Type> sqlx::FromRow<'r, sqlx::postgres::PgRow> for SqlRequest { fn from_row(row: &'r sqlx::postgres::PgRow) -> Result { + use chrono::Utc; use sqlx::Row; use std::str::FromStr; diff --git a/packages/apalis-sql/src/lib.rs b/packages/apalis-sql/src/lib.rs index 82d97197..776cd6a1 100644 --- a/packages/apalis-sql/src/lib.rs +++ b/packages/apalis-sql/src/lib.rs @@ -130,11 +130,11 @@ impl Config { } /// Calculates the status from a result -pub(crate) fn calculate_status(res: &Result) -> State { +pub fn calculate_status(res: &Result) -> State { match res { Ok(_) => State::Done, Err(e) => match &e { - _ if e.starts_with("AbortError") => State::Killed, + _ if e.to_string().starts_with("AbortError") => State::Killed, _ => State::Failed, }, } @@ -144,7 +144,8 @@ pub(crate) fn calculate_status(res: &Result) -> State { #[macro_export] macro_rules! sql_storage_tests { ($setup:path, $storage_type:ty, $job_type:ty) => { - async fn setup_test_wrapper() -> TestWrapper<$storage_type, $job_type> { + async fn setup_test_wrapper( + ) -> TestWrapper<$storage_type, $job_type, ()> { let (mut t, poller) = TestWrapper::new_with_service( $setup().await, apalis_core::service_fn::service_fn(email_service::send_email), diff --git a/packages/apalis-sql/src/mysql.rs b/packages/apalis-sql/src/mysql.rs index 0e39c1c5..e9030b04 100644 --- a/packages/apalis-sql/src/mysql.rs +++ b/packages/apalis-sql/src/mysql.rs @@ -1,6 +1,6 @@ use apalis_core::codec::json::JsonCodec; use apalis_core::error::Error; -use apalis_core::layers::{Ack, AckLayer, AckResponse}; +use apalis_core::layers::{Ack, AckLayer}; use apalis_core::notify::Notify; use apalis_core::poller::controller::Controller; use apalis_core::poller::stream::BackendStream; @@ -39,7 +39,10 @@ pub struct MysqlStorage { controller: Controller, config: Config, codec: BoxCodec, - ack_notify: Notify>, + ack_notify: Notify<( + SqlContext, + Result, + )>, } impl fmt::Debug for MysqlStorage { @@ -369,15 +372,15 @@ where } } -impl Backend> +impl Backend, Res> for MysqlStorage { type Stream = BackendStream>>; - type Layer = AckLayer, T>; + type Layer = AckLayer, T, Res>; - fn poll(self, worker: WorkerId) -> Poller { - let layer = AckLayer::new(self.clone(), worker.clone()); + fn poll(self, worker: WorkerId) -> Poller { + let layer = AckLayer::new(self.clone()); let config = self.config.clone(); let controller = self.controller.clone(); let pool = self.pool.clone(); @@ -395,15 +398,18 @@ impl Backend Backend Ack for MysqlStorage { - type Acknowledger = TaskId; - type Error = sqlx::Error; - async fn ack(&mut self, response: AckResponse) -> Result<(), sqlx::Error> { +impl Ack for MysqlStorage { + type Context = SqlContext; + type AckError = sqlx::Error; + async fn ack( + &mut self, + ctx: &Self::Context, + res: &Result, + ) -> Result<(), sqlx::Error> { self.ack_notify - .notify(response) + .notify(( + ctx.clone(), + res.as_ref() + .map(|r| serde_json::to_value(&r).unwrap()) + .map_err(|c| c.clone()), + )) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::BrokenPipe, e)))?; Ok(()) diff --git a/packages/apalis-sql/src/postgres.rs b/packages/apalis-sql/src/postgres.rs index e01941a4..c4e4b642 100644 --- a/packages/apalis-sql/src/postgres.rs +++ b/packages/apalis-sql/src/postgres.rs @@ -42,7 +42,7 @@ use crate::context::SqlContext; use crate::{calculate_status, Config}; use apalis_core::codec::json::JsonCodec; use apalis_core::error::Error; -use apalis_core::layers::{Ack, AckLayer, AckResponse}; +use apalis_core::layers::{Ack, AckLayer}; use apalis_core::notify::Notify; use apalis_core::poller::controller::Controller; use apalis_core::poller::stream::BackendStream; @@ -82,7 +82,7 @@ pub struct PostgresStorage { codec: BoxCodec, config: Config, controller: Controller, - ack_notify: Notify>, + ack_notify: Notify<(SqlContext, Result)>, subscription: Option, } @@ -116,15 +116,15 @@ impl fmt::Debug for PostgresStorage { } } -impl Backend> +impl Backend, Res> for PostgresStorage { type Stream = BackendStream>>; - type Layer = AckLayer, T>; + type Layer = AckLayer, T, Res>; - fn poll(mut self, worker: WorkerId) -> Poller { - let layer = AckLayer::new(self.clone(), worker.clone()); + fn poll(mut self, worker: WorkerId) -> Poller { + let layer = AckLayer::new(self.clone()); let subscription = self.subscription.clone(); let config = self.config.clone(); let controller = self.controller.clone(); @@ -174,8 +174,8 @@ impl Backend { if let Some(ids) = ids { - let ack_ids: Vec<(String, String, String, String, u64)> = ids.iter().map(|c| { - (c.acknowledger.to_string(), c.worker.to_string(), serde_json::to_string(&c.result).unwrap(), calculate_status(&c.result).to_string(), (c.attempts.current() + 1) as u64 ) + let ack_ids: Vec<(String, String, String, String, u64)> = ids.iter().map(|(ctx, res)| { + (ctx.id().to_string(), ctx.lock_by().clone().unwrap().to_string(), serde_json::to_string(&res.as_ref().map_err(|e| e.to_string())).unwrap(), calculate_status(res).to_string(), (ctx.attempts().current() + 1) as u64 ) }).collect(); let query = "UPDATE apalis.jobs @@ -546,12 +546,21 @@ where } } -impl Ack for PostgresStorage { - type Acknowledger = TaskId; - type Error = sqlx::Error; - async fn ack(&mut self, res: AckResponse) -> Result<(), sqlx::Error> { +impl Ack for PostgresStorage { + type Context = SqlContext; + type AckError = sqlx::Error; + async fn ack( + &mut self, + ctx: &Self::Context, + res: &Result, + ) -> Result<(), sqlx::Error> { self.ack_notify - .notify(res) + .notify(( + ctx.clone(), + res.as_ref() + .map(|r| serde_json::to_value(r).unwrap()) + .map_err(|e| e.clone()), + )) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::Interrupted, e)))?; Ok(()) @@ -618,7 +627,6 @@ mod tests { use crate::sql_storage_tests; use super::*; - use apalis_core::task::attempt::Attempt; use apalis_core::test_utils::DummyService; use chrono::Utc; use email_service::Email; diff --git a/packages/apalis-sql/src/sqlite.rs b/packages/apalis-sql/src/sqlite.rs index 5991e61c..42bda01f 100644 --- a/packages/apalis-sql/src/sqlite.rs +++ b/packages/apalis-sql/src/sqlite.rs @@ -1,9 +1,8 @@ use crate::context::SqlContext; use crate::{calculate_status, Config}; - use apalis_core::codec::json::JsonCodec; use apalis_core::error::Error; -use apalis_core::layers::{Ack, AckLayer, AckResponse}; +use apalis_core::layers::{Ack, AckLayer}; use apalis_core::poller::controller::Controller; use apalis_core::poller::stream::BackendStream; use apalis_core::poller::Poller; @@ -450,14 +449,14 @@ impl SqliteStorage { } } -impl Backend> +impl Backend, Res> for SqliteStorage { type Stream = BackendStream>>; - type Layer = AckLayer, T>; + type Layer = AckLayer, T, Res>; - fn poll(mut self, worker: WorkerId) -> Poller { - let layer = AckLayer::new(self.clone(), worker.clone()); + fn poll(mut self, worker: WorkerId) -> Poller { + let layer = AckLayer::new(self.clone()); let config = self.config.clone(); let controller = self.controller.clone(); let stream = self @@ -478,21 +477,25 @@ impl Backend Ack for SqliteStorage { - type Acknowledger = TaskId; - type Error = sqlx::Error; - async fn ack(&mut self, res: AckResponse) -> Result<(), sqlx::Error> { +impl Ack for SqliteStorage { + type Context = SqlContext; + type AckError = sqlx::Error; + async fn ack( + &mut self, + ctx: &Self::Context, + res: &Result, + ) -> Result<(), sqlx::Error> { let pool = self.pool.clone(); let query = "UPDATE Jobs SET status = ?4, done_at = strftime('%s','now'), last_error = ?3, attempts =?5 WHERE id = ?1 AND lock_by = ?2"; - let result = serde_json::to_string(&res.result) + let result = serde_json::to_string(&res.as_ref().map_err(|r| r.to_string())) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; sqlx::query(query) - .bind(res.acknowledger.to_string()) - .bind(res.worker.to_string()) + .bind(ctx.id().to_string()) + .bind(ctx.lock_by().as_ref().unwrap().to_string()) .bind(result) - .bind(calculate_status(&res.result).to_string()) - .bind(res.attempts.current() as i64 + 1) + .bind(calculate_status(&res).to_string()) + .bind(ctx.attempts().current() as i64 + 1) .execute(&pool) .await?; Ok(()) @@ -506,7 +509,6 @@ mod tests { use crate::sql_storage_tests; use super::*; - use apalis_core::task::attempt::Attempt; use apalis_core::test_utils::DummyService; use chrono::Utc; use email_service::example_good_email; @@ -618,12 +620,7 @@ mod tests { let job_id = ctx.unwrap().id(); storage - .ack(AckResponse { - acknowledger: job_id.clone(), - result: Ok("Success".to_string()), - worker: worker_id.clone(), - attempts: Attempt::new_with_value(1), - }) + .ack(ctx.as_ref().unwrap(), &Ok(())) .await .expect("failed to acknowledge the job"); From b790332217c821c6afa418530f5b57a6ed933a28 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 25 Jul 2024 21:08:22 +0300 Subject: [PATCH 37/59] fix(deps): update rust crate sqlx to 0.8.0 (#380) * chore: fix typos (#346) * chore: Add repository to metadata (#345) * fix(deps): update rust crate sqlx to 0.8.0 * fix: sqlite example --------- Co-authored-by: John Vandenberg Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: geofmureithi Co-authored-by: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> --- Cargo.toml | 10 +++++++--- examples/sqlite/Cargo.toml | 2 +- packages/apalis-core/Cargo.toml | 3 ++- packages/apalis-core/src/data.rs | 2 +- packages/apalis-cron/Cargo.toml | 3 ++- packages/apalis-redis/Cargo.toml | 3 ++- packages/apalis-sql/Cargo.toml | 5 +++-- packages/apalis-sql/src/sqlite.rs | 2 +- 8 files changed, 19 insertions(+), 11 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ece12666..cb527542 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,15 +1,19 @@ +[workspace.package] +edition = "2021" +repository = "https://github.com/geofmureithi/apalis" + [package] name = "apalis" version = "0.6.0-rc.4" authors = ["Geoffrey Mureithi "] description = "Simple, extensible multithreaded background job processing for Rust" -repository = "https://github.com/geofmureithi/apalis" +edition.workspace = true +repository.workspace = true documentation = "https://docs.rs/apalis" readme = "README.md" license = "MIT OR Apache-2.0" keywords = ["job", "task", "scheduler", "worker", "cron"] categories = ["database"] -edition = "2021" [lib] bench = false @@ -85,7 +89,7 @@ redis = { version = "0.25.3", default-features = false, features = [ ] } [dev-dependencies.sqlx] -version = "0.7.4" +version = "0.8.0" default-features = false features = ["chrono", "mysql", "sqlite", "postgres"] diff --git a/examples/sqlite/Cargo.toml b/examples/sqlite/Cargo.toml index 646ab7d8..b3a4cf96 100644 --- a/examples/sqlite/Cargo.toml +++ b/examples/sqlite/Cargo.toml @@ -21,6 +21,6 @@ default-features = false version = "0.1" [dependencies.sqlx] -version = "0.7.0" +version = "0.8" default-features = false features = ["sqlite"] diff --git a/packages/apalis-core/Cargo.toml b/packages/apalis-core/Cargo.toml index 7afb7f59..6c949c84 100644 --- a/packages/apalis-core/Cargo.toml +++ b/packages/apalis-core/Cargo.toml @@ -2,7 +2,8 @@ name = "apalis-core" version = "0.6.0-rc.4" authors = ["Njuguna Mureithi "] -edition = "2021" +edition.workspace = true +repository.workspace = true license = "MIT" description = "Core for apalis: simple, extensible multithreaded background processing for Rust" categories = ["concurrency"] diff --git a/packages/apalis-core/src/data.rs b/packages/apalis-core/src/data.rs index f5eec311..33cd3f9e 100644 --- a/packages/apalis-core/src/data.rs +++ b/packages/apalis-core/src/data.rs @@ -161,7 +161,7 @@ impl Extensions { self.map.as_ref().map_or(true, |map| map.is_empty()) } - /// Get the numer of extensions available. + /// Get the number of extensions available. /// /// # Example /// diff --git a/packages/apalis-cron/Cargo.toml b/packages/apalis-cron/Cargo.toml index c79391e7..74e9efe8 100644 --- a/packages/apalis-cron/Cargo.toml +++ b/packages/apalis-cron/Cargo.toml @@ -1,7 +1,8 @@ [package] name = "apalis-cron" version = "0.6.0-rc.4" -edition = "2021" +edition.workspace = true +repository.workspace = true authors = ["Njuguna Mureithi "] license = "MIT" description = "A simple yet extensible library for cron-like job scheduling for rust." diff --git a/packages/apalis-redis/Cargo.toml b/packages/apalis-redis/Cargo.toml index f757a2e6..39d3491b 100644 --- a/packages/apalis-redis/Cargo.toml +++ b/packages/apalis-redis/Cargo.toml @@ -2,7 +2,8 @@ name = "apalis-redis" version = "0.6.0-rc.4" authors = ["Njuguna Mureithi "] -edition = "2021" +edition.workspace = true +repository.workspace = true readme = "../../README.md" license = "MIT" diff --git a/packages/apalis-sql/Cargo.toml b/packages/apalis-sql/Cargo.toml index 2498233f..a512d9b9 100644 --- a/packages/apalis-sql/Cargo.toml +++ b/packages/apalis-sql/Cargo.toml @@ -2,7 +2,8 @@ name = "apalis-sql" version = "0.6.0-rc.4" authors = ["Njuguna Mureithi "] -edition = "2021" +edition.workspace = true +repository.workspace = true readme = "../../README.md" license = "MIT" @@ -18,7 +19,7 @@ async-std-comp = ["async-std", "sqlx/runtime-async-std-rustls"] tokio-comp = ["tokio", "sqlx/runtime-tokio-rustls"] [dependencies.sqlx] -version = "0.7.4" +version = "0.8.0" default-features = false features = ["chrono"] diff --git a/packages/apalis-sql/src/sqlite.rs b/packages/apalis-sql/src/sqlite.rs index 42bda01f..402a8472 100644 --- a/packages/apalis-sql/src/sqlite.rs +++ b/packages/apalis-sql/src/sqlite.rs @@ -404,7 +404,7 @@ impl SqliteStorage { tx.commit().await?; Ok(()) } - + /// Add jobs that failed back to the queue if there are still remaining attemps pub async fn reenqueue_failed(&mut self) -> Result<(), sqlx::Error> { let job_type = self.config.namespace.clone(); From 649b384663bbd0dabd92c8d31dc9b0079ab3f5c2 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Thu, 25 Jul 2024 21:16:32 +0300 Subject: [PATCH 38/59] bump: to v0.6.0-rc.5 (#385) --- Cargo.toml | 4 ++-- packages/apalis-core/Cargo.toml | 2 +- packages/apalis-cron/Cargo.toml | 4 ++-- packages/apalis-redis/Cargo.toml | 4 ++-- packages/apalis-sql/Cargo.toml | 4 ++-- packages/apalis-sql/src/lib.rs | 3 +-- packages/apalis-sql/src/sqlite.rs | 2 +- 7 files changed, 11 insertions(+), 12 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index cb527542..68e2cead 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,7 +4,7 @@ repository = "https://github.com/geofmureithi/apalis" [package] name = "apalis" -version = "0.6.0-rc.4" +version = "0.6.0-rc.5" authors = ["Geoffrey Mureithi "] description = "Simple, extensible multithreaded background job processing for Rust" edition.workspace = true @@ -58,7 +58,7 @@ layers = [ docsrs = ["document-features"] [dependencies.apalis-core] -version = "0.6.0-rc.4" +version = "0.6.0-rc.5" default-features = false path = "./packages/apalis-core" diff --git a/packages/apalis-core/Cargo.toml b/packages/apalis-core/Cargo.toml index 6c949c84..d3f36711 100644 --- a/packages/apalis-core/Cargo.toml +++ b/packages/apalis-core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-core" -version = "0.6.0-rc.4" +version = "0.6.0-rc.5" authors = ["Njuguna Mureithi "] edition.workspace = true repository.workspace = true diff --git a/packages/apalis-cron/Cargo.toml b/packages/apalis-cron/Cargo.toml index 74e9efe8..0a8ae9ad 100644 --- a/packages/apalis-cron/Cargo.toml +++ b/packages/apalis-cron/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-cron" -version = "0.6.0-rc.4" +version = "0.6.0-rc.5" edition.workspace = true repository.workspace = true authors = ["Njuguna Mureithi "] @@ -10,7 +10,7 @@ description = "A simple yet extensible library for cron-like job scheduling for # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.4", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.5", default-features = false, features = [ "sleep", ] } cron = "0.12.1" diff --git a/packages/apalis-redis/Cargo.toml b/packages/apalis-redis/Cargo.toml index 39d3491b..38dab7ff 100644 --- a/packages/apalis-redis/Cargo.toml +++ b/packages/apalis-redis/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-redis" -version = "0.6.0-rc.4" +version = "0.6.0-rc.5" authors = ["Njuguna Mureithi "] edition.workspace = true repository.workspace = true @@ -12,7 +12,7 @@ description = "Redis Storage for apalis: use Redis for background jobs and messa # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.4", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.5", default-features = false, features = [ "sleep", "json", ] } diff --git a/packages/apalis-sql/Cargo.toml b/packages/apalis-sql/Cargo.toml index a512d9b9..33400fbe 100644 --- a/packages/apalis-sql/Cargo.toml +++ b/packages/apalis-sql/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-sql" -version = "0.6.0-rc.4" +version = "0.6.0-rc.5" authors = ["Njuguna Mureithi "] edition.workspace = true repository.workspace = true @@ -26,7 +26,7 @@ features = ["chrono"] [dependencies] serde = { version = "1", features = ["derive"] } serde_json = "1" -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.4", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.5", default-features = false, features = [ "sleep", "json", ] } diff --git a/packages/apalis-sql/src/lib.rs b/packages/apalis-sql/src/lib.rs index 776cd6a1..03f186ec 100644 --- a/packages/apalis-sql/src/lib.rs +++ b/packages/apalis-sql/src/lib.rs @@ -144,8 +144,7 @@ pub fn calculate_status(res: &Result) -> St #[macro_export] macro_rules! sql_storage_tests { ($setup:path, $storage_type:ty, $job_type:ty) => { - async fn setup_test_wrapper( - ) -> TestWrapper<$storage_type, $job_type, ()> { + async fn setup_test_wrapper() -> TestWrapper<$storage_type, $job_type, ()> { let (mut t, poller) = TestWrapper::new_with_service( $setup().await, apalis_core::service_fn::service_fn(email_service::send_email), diff --git a/packages/apalis-sql/src/sqlite.rs b/packages/apalis-sql/src/sqlite.rs index 402a8472..42bda01f 100644 --- a/packages/apalis-sql/src/sqlite.rs +++ b/packages/apalis-sql/src/sqlite.rs @@ -404,7 +404,7 @@ impl SqliteStorage { tx.commit().await?; Ok(()) } - + /// Add jobs that failed back to the queue if there are still remaining attemps pub async fn reenqueue_failed(&mut self) -> Result<(), sqlx::Error> { let job_type = self.config.namespace.clone(); From e7a751c36e13205088fce014dfb5e46c07d93613 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Fri, 2 Aug 2024 08:50:45 +0300 Subject: [PATCH 39/59] chore: standardize codec usage (#388) * bump: to v0.6.0-rc.5 * fix: standardize codec usage * lint: cargo fmt --- examples/redis-deadpool/src/main.rs | 2 +- examples/redis-mq-example/src/main.rs | 73 ++++++++---- examples/redis-with-msg-pack/Cargo.toml | 1 + examples/redis-with-msg-pack/src/main.rs | 23 ++-- packages/apalis-core/src/codec/json.rs | 59 ++++++---- packages/apalis-core/src/lib.rs | 31 +++-- packages/apalis-redis/Cargo.toml | 2 +- packages/apalis-redis/src/lib.rs | 1 - packages/apalis-redis/src/storage.rs | 113 +++++++++--------- packages/apalis-sql/src/mysql.rs | 90 ++++++++------ packages/apalis-sql/src/postgres.rs | 143 +++++++++++++---------- packages/apalis-sql/src/sqlite.rs | 62 +++++----- 12 files changed, 327 insertions(+), 273 deletions(-) diff --git a/examples/redis-deadpool/src/main.rs b/examples/redis-deadpool/src/main.rs index 0b538945..0d0d7561 100644 --- a/examples/redis-deadpool/src/main.rs +++ b/examples/redis-deadpool/src/main.rs @@ -26,8 +26,8 @@ async fn main() -> Result<()> { produce_jobs(&mut storage).await?; let worker = WorkerBuilder::new("rango-tango") - .backend(storage) .data(pool) + .backend(storage) .build_fn(send_email); Monitor::::new() diff --git a/examples/redis-mq-example/src/main.rs b/examples/redis-mq-example/src/main.rs index b9406ce2..4fd17daa 100644 --- a/examples/redis-mq-example/src/main.rs +++ b/examples/redis-mq-example/src/main.rs @@ -2,7 +2,7 @@ use std::{fmt::Debug, marker::PhantomData, time::Duration}; use apalis::{layers::tracing::TraceLayer, prelude::*}; -use apalis_redis::{self, Config, RedisCodec, RedisJob}; +use apalis_redis::{self, Config, RedisJob}; use apalis_core::{ codec::json::JsonCodec, @@ -11,18 +11,19 @@ use apalis_core::{ use email_service::{send_email, Email}; use futures::{channel::mpsc, SinkExt}; use rsmq_async::{Rsmq, RsmqConnection, RsmqError}; +use serde::{de::DeserializeOwned, Serialize}; use tokio::time::sleep; use tracing::{error, info}; -struct RedisMq { +struct RedisMq>> { conn: Rsmq, msg_type: PhantomData, config: Config, - codec: RedisCodec, + codec: PhantomData, } // Manually implement Clone for RedisMq -impl Clone for RedisMq { +impl Clone for RedisMq { fn clone(&self) -> Self { RedisMq { conn: self.conn.clone(), @@ -33,7 +34,11 @@ impl Clone for RedisMq { } } -impl Backend, Res> for RedisMq { +impl Backend, Res> for RedisMq +where + M: Send + DeserializeOwned + 'static, + C: Codec>, +{ type Stream = RequestStream>; type Layer = AckLayer; @@ -47,11 +52,14 @@ impl Backend, Res> for RedisMq { sleep(*self.config.get_poll_interval()).await; let msg: Option> = self .conn - .receive_message("email", None) + .receive_message(self.config.get_namespace(), None) .await .unwrap() .map(|r| { - let mut req: Request<_> = self.codec.decode(&r.message).unwrap().into(); + let mut req: Request = C::decode::>(r.message) + .map_err(Into::into) + .unwrap() + .into(); req.insert(r.id); req }); @@ -62,7 +70,12 @@ impl Backend, Res> for RedisMq { } } -impl Ack for RedisMq { +impl Ack for RedisMq +where + T: Send, + Res: Debug + Send + Sync, + C: Send, +{ type Context = String; type AckError = RsmqError; @@ -70,37 +83,49 @@ impl Ack for RedisMq { async fn ack( &mut self, ctx: &Self::Context, - res: &Result, + _res: &Result, ) -> Result<(), Self::AckError> { - println!("Attempting to ACK {:?}", res); - self.conn.delete_message("email", &ctx).await?; + self.conn + .delete_message(self.config.get_namespace(), &ctx) + .await?; Ok(()) } } -impl MessageQueue for RedisMq { +impl MessageQueue for RedisMq +where + Message: Send + Serialize + DeserializeOwned + 'static, + C: Codec> + Send, +{ type Error = RsmqError; async fn enqueue(&mut self, message: Message) -> Result<(), Self::Error> { - let bytes = self - .codec - .encode(&RedisJob::new(message, Default::default())) + let bytes = C::encode(&RedisJob::new(message, Default::default())) + .map_err(Into::into) .unwrap(); - self.conn.send_message("email", bytes, None).await?; + self.conn + .send_message(self.config.get_namespace(), bytes, None) + .await?; Ok(()) } async fn dequeue(&mut self) -> Result, Self::Error> { - let codec = self.codec.clone(); - Ok(self.conn.receive_message("email", None).await?.map(|r| { - let req: Request = codec.decode(&r.message).unwrap().into(); - req.take() - })) + Ok(self + .conn + .receive_message(self.config.get_namespace(), None) + .await? + .map(|r| { + let req: Request = C::decode::>(r.message) + .map_err(Into::into) + .unwrap() + .into(); + req.take() + })) } async fn size(&mut self) -> Result { self.conn - .get_queue_attributes("email") + .get_queue_attributes(self.config.get_namespace()) .await? .msgs .try_into() @@ -131,8 +156,8 @@ async fn main() -> anyhow::Result<()> { let mut mq = RedisMq { conn, msg_type: PhantomData, - codec: RedisCodec::new(Box::new(JsonCodec)), - config: Config::default(), + codec: PhantomData, + config: Config::default().set_namespace("email"), }; produce_jobs(&mut mq).await?; diff --git a/examples/redis-with-msg-pack/Cargo.toml b/examples/redis-with-msg-pack/Cargo.toml index ec248247..abbd795e 100644 --- a/examples/redis-with-msg-pack/Cargo.toml +++ b/examples/redis-with-msg-pack/Cargo.toml @@ -14,6 +14,7 @@ tracing-subscriber = "0.3.11" chrono = { version = "0.4", default-features = false, features = ["clock"] } email-service = { path = "../email-service" } rmp-serde = "1.3" +redis = "0.25" [dependencies.tracing] diff --git a/examples/redis-with-msg-pack/src/main.rs b/examples/redis-with-msg-pack/src/main.rs index 4afa89ad..1ac2e24d 100644 --- a/examples/redis-with-msg-pack/src/main.rs +++ b/examples/redis-with-msg-pack/src/main.rs @@ -5,19 +5,24 @@ use apalis::prelude::*; use apalis_redis::RedisStorage; use email_service::{send_email, Email}; -use serde::{de::DeserializeOwned, Serialize}; +use redis::aio::ConnectionManager; +use serde::{Deserialize, Serialize}; use tracing::info; struct MessagePack; -impl Codec> for MessagePack { +impl Codec for MessagePack { + type Compact = Vec; type Error = Error; - fn encode(&self, input: &T) -> Result, Self::Error> { - rmp_serde::to_vec(input).map_err(|e| Error::SourceError(Arc::new(Box::new(e)))) + fn encode(input: T) -> Result, Self::Error> { + rmp_serde::to_vec(&input).map_err(|e| Error::SourceError(Arc::new(Box::new(e)))) } - fn decode(&self, compact: &Vec) -> Result { - rmp_serde::from_slice(compact).map_err(|e| Error::SourceError(Arc::new(Box::new(e)))) + fn decode(compact: Vec) -> Result + where + O: for<'de> Deserialize<'de>, + { + rmp_serde::from_slice(&compact).map_err(|e| Error::SourceError(Arc::new(Box::new(e)))) } } @@ -31,7 +36,7 @@ async fn main() -> Result<()> { let config = apalis_redis::Config::default() .set_namespace("apalis_redis-with-msg-pack") .set_max_retries(5); - let storage = RedisStorage::new_with_codec(conn, config, MessagePack); + let storage = RedisStorage::new_with_codec::(conn, config); // This can be in another part of the program produce_jobs(storage.clone()).await?; @@ -52,7 +57,9 @@ async fn main() -> Result<()> { Ok(()) } -async fn produce_jobs(mut storage: RedisStorage) -> Result<()> { +async fn produce_jobs( + mut storage: RedisStorage, +) -> Result<()> { for index in 0..10 { storage .push(Email { diff --git a/packages/apalis-core/src/codec/json.rs b/packages/apalis-core/src/codec/json.rs index ef85854c..864f1603 100644 --- a/packages/apalis-core/src/codec/json.rs +++ b/packages/apalis-core/src/codec/json.rs @@ -1,43 +1,56 @@ -use std::sync::Arc; +use std::marker::PhantomData; -use crate::{error::Error, Codec}; -use serde::{de::DeserializeOwned, Serialize}; +use crate::Codec; +use serde::{Deserialize, Serialize}; use serde_json::Value; /// Json encoding and decoding #[derive(Debug, Clone, Default)] -pub struct JsonCodec; +pub struct JsonCodec { + _o: PhantomData, +} -impl Codec> for JsonCodec { - type Error = Error; - fn encode(&self, input: &T) -> Result, Self::Error> { - serde_json::to_vec(input).map_err(|e| Error::SourceError(Arc::new(Box::new(e)))) +impl Codec for JsonCodec> { + type Compact = Vec; + type Error = serde_json::Error; + fn encode(input: T) -> Result, Self::Error> { + serde_json::to_vec(&input) } - fn decode(&self, compact: &Vec) -> Result { - serde_json::from_slice(compact).map_err(|e| Error::SourceError(Arc::new(Box::new(e)))) + fn decode(compact: Vec) -> Result + where + O: for<'de> Deserialize<'de>, + { + serde_json::from_slice(&compact) } } -impl Codec for JsonCodec { - type Error = Error; - fn encode(&self, input: &T) -> Result { - serde_json::to_string(input).map_err(|e| Error::SourceError(Arc::new(Box::new(e)))) +impl Codec for JsonCodec { + type Compact = String; + type Error = serde_json::Error; + fn encode(input: T) -> Result { + serde_json::to_string(&input) } - fn decode(&self, compact: &String) -> Result { - serde_json::from_str(compact).map_err(|e| Error::SourceError(Arc::new(Box::new(e)))) + fn decode(compact: String) -> Result + where + O: for<'de> Deserialize<'de>, + { + serde_json::from_str(&compact) } } -impl Codec for JsonCodec { - type Error = Error; - fn encode(&self, input: &T) -> Result { - serde_json::to_value(input).map_err(|e| Error::SourceError(Arc::new(Box::new(e)))) +impl Codec for JsonCodec { + type Compact = Value; + type Error = serde_json::Error; + fn encode(input: T) -> Result { + serde_json::to_value(input) } - fn decode(&self, compact: &Value) -> Result { - serde_json::from_value(compact.clone()) - .map_err(|e| Error::SourceError(Arc::new(Box::new(e)))) + fn decode(compact: Value) -> Result + where + O: for<'de> Deserialize<'de>, + { + serde_json::from_value(compact) } } diff --git a/packages/apalis-core/src/lib.rs b/packages/apalis-core/src/lib.rs index caa9fc6a..0e7157ec 100644 --- a/packages/apalis-core/src/lib.rs +++ b/packages/apalis-core/src/lib.rs @@ -22,10 +22,10 @@ #![cfg_attr(docsrs, feature(doc_cfg))] //! # apalis-core //! Utilities for building job and message processing tools. -use std::sync::Arc; - +use error::BoxDynError; use futures::Stream; use poller::Poller; +use serde::{Deserialize, Serialize}; use tower::Service; use worker::WorkerId; @@ -86,23 +86,22 @@ pub trait Backend { worker: WorkerId, ) -> Poller; } - -/// This allows encoding and decoding of requests in different backends -pub trait Codec { +/// A codec allows backends to encode and decode data +pub trait Codec { + /// The mode of storage by the codec + type Compact; /// Error encountered by the codec - type Error; - - /// Convert to the compact version - fn encode(&self, input: &T) -> Result; - - /// Decode back to our request type - fn decode(&self, compact: &Compact) -> Result; + type Error: Into; + /// The encoding method + fn encode(input: I) -> Result + where + I: Serialize; + /// The decoding method + fn decode(input: Self::Compact) -> Result + where + O: for<'de> Deserialize<'de>; } -/// A boxed codec -pub type BoxCodec = - Arc + Sync + Send + 'static>>; - /// Sleep utilities #[cfg(feature = "sleep")] pub async fn sleep(duration: std::time::Duration) { diff --git a/packages/apalis-redis/Cargo.toml b/packages/apalis-redis/Cargo.toml index 38dab7ff..35b8711c 100644 --- a/packages/apalis-redis/Cargo.toml +++ b/packages/apalis-redis/Cargo.toml @@ -16,7 +16,7 @@ apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.5", def "sleep", "json", ] } -redis = { version = "0.25.3", default-features = false, features = [ +redis = { version = "0.25.4", default-features = false, features = [ "script", "aio", "connection-manager", diff --git a/packages/apalis-redis/src/lib.rs b/packages/apalis-redis/src/lib.rs index 86815734..8257be3b 100644 --- a/packages/apalis-redis/src/lib.rs +++ b/packages/apalis-redis/src/lib.rs @@ -30,7 +30,6 @@ mod storage; pub use storage::connect; pub use storage::Config; -pub use storage::RedisCodec; pub use storage::RedisJob; pub use storage::RedisQueueInfo; pub use storage::RedisStorage; diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index 448da355..780ef1c6 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -24,7 +24,6 @@ use std::any::type_name; use std::fmt::{self, Debug}; use std::io; use std::num::TryFromIntError; -use std::sync::Arc; use std::time::SystemTime; use std::{marker::PhantomData, time::Duration}; @@ -348,22 +347,17 @@ impl Config { } } -/// The codec used by redis to encode and decode jobs -pub type RedisCodec = Arc< - Box, Vec, Error = apalis_core::error::Error> + Sync + Send + 'static>, ->; - /// Represents a [Storage] that uses Redis for storage. -pub struct RedisStorage { +pub struct RedisStorage>> { conn: Conn, job_type: PhantomData, scripts: RedisScript, controller: Controller, config: Config, - codec: RedisCodec, + codec: PhantomData, } -impl fmt::Debug for RedisStorage { +impl fmt::Debug for RedisStorage { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RedisStorage") .field("conn", &"ConnectionManager") @@ -374,7 +368,7 @@ impl fmt::Debug for RedisStorage { } } -impl Clone for RedisStorage { +impl Clone for RedisStorage { fn clone(&self) -> Self { Self { conn: self.conn.clone(), @@ -390,29 +384,28 @@ impl Clone for RedisStorage { impl RedisStorage { /// Start a new connection pub fn new(conn: Conn) -> Self { - Self::new_with_codec( + Self::new_with_codec::>>( conn, Config::default().set_namespace(type_name::()), - JsonCodec, ) } /// Start a connection with a custom config pub fn new_with_config(conn: Conn, config: Config) -> Self { - Self::new_with_codec(conn, config, JsonCodec) + Self::new_with_codec::>>(conn, config) } /// Start a new connection providing custom config and a codec - pub fn new_with_codec(conn: Conn, config: Config, codec: C) -> Self + pub fn new_with_codec(conn: Conn, config: Config) -> RedisStorage where - C: Codec, Vec, Error = apalis_core::error::Error> + Sync + Send + 'static, + C: Codec + Sync + Send + 'static, { RedisStorage { conn, job_type: PhantomData, controller: Controller::new(), config, - codec: Arc::new(Box::new(codec)), + codec: PhantomData::, scripts: RedisScript { done_job: redis::Script::new(include_str!("../lua/done_job.lua")), push_job: redis::Script::new(include_str!("../lua/push_job.lua")), @@ -444,18 +437,21 @@ impl RedisStorage { pub fn get_config(&self) -> &Config { &self.config } +} +impl RedisStorage { /// Get the underlying codec details - pub fn get_codec(&self) -> &RedisCodec { + pub fn get_codec(&self) -> &PhantomData { &self.codec } } -impl< - T: Serialize + DeserializeOwned + Sync + Send + Unpin + 'static, - Conn: ConnectionLike + Send + Sync + 'static, - Res: Send + Serialize + Sync + 'static, - > Backend, Res> for RedisStorage +impl Backend, Res> for RedisStorage +where + T: Serialize + DeserializeOwned + Sync + Send + Unpin + 'static, + Conn: ConnectionLike + Send + Sync + 'static, + Res: Send + Serialize + Sync + 'static, + C: Codec> + Send + 'static, { type Stream = BackendStream>>; @@ -531,10 +527,12 @@ impl< } } -impl Ack - for RedisStorage +impl Ack for RedisStorage where Res: Serialize + Sync + Send + 'static, + T: Sync + Send, + Conn: ConnectionLike + Send + Sync + 'static, + C: Codec> + Send, { type Context = Context; type AckError = RedisError; @@ -561,7 +559,7 @@ where .key(self.config.job_data_hash()) .arg(ctx.id.to_string()) .arg(now) - .arg(serde_json::to_string(success_res).unwrap()) + .arg(C::encode(success_res).map_err(Into::into).unwrap()) .invoke_async(&mut self.conn) .await } @@ -598,10 +596,11 @@ where } } -impl< - T: DeserializeOwned + Send + Unpin + Send + Sync + 'static, - Conn: ConnectionLike + Send + Sync + 'static, - > RedisStorage +impl RedisStorage +where + T: DeserializeOwned + Send + Unpin + Send + Sync + 'static, + Conn: ConnectionLike + Send + Sync + 'static, + C: Codec>, { async fn fetch_next(&mut self, worker_id: &WorkerId) -> Result>, RedisError> { let fetch_jobs = self.scripts.get_jobs.clone(); @@ -610,7 +609,6 @@ impl< let job_data_hash = self.config.job_data_hash(); let inflight_set = format!("{}:{}", self.config.inflight_jobs_set(), worker_id); let signal_list = self.config.signal_list(); - let codec = self.codec.clone(); let namespace = self.config.namespace.clone(); let result = fetch_jobs @@ -629,9 +627,8 @@ impl< let mut processed = vec![]; for job in jobs { let bytes = deserialize_job(&job)?; - let mut request = codec - .decode(bytes) - .map_err(|e| build_error(&e.to_string()))?; + let mut request: RedisJob = C::decode(bytes.to_vec()) + .map_err(|e| build_error(&e.into().to_string()))?; request.ctx_mut().lock_by = Some(worker_id.clone()); let mut request: Request = request.into(); request.insert(Namespace(namespace.clone())); @@ -668,7 +665,7 @@ fn deserialize_job(job: &Value) -> Result<&Vec, RedisError> { } } -impl RedisStorage { +impl RedisStorage { async fn keep_alive(&mut self, worker_id: &WorkerId) -> Result<(), RedisError> { let register_consumer = self.scripts.register_consumer.clone(); let inflight_set = format!("{}:{}", self.config.inflight_jobs_set(), worker_id); @@ -685,9 +682,11 @@ impl RedisStorage { } } -impl Storage for RedisStorage +impl Storage for RedisStorage where T: Serialize + DeserializeOwned + Send + 'static + Unpin + Sync, + Conn: ConnectionLike + Send + Sync + 'static, + C: Codec> + Send + 'static, { type Job = T; type Error = RedisError; @@ -704,10 +703,8 @@ where id: job_id.clone(), ..Default::default() }; - let job = self - .codec - .encode(&RedisJob { ctx, job }) - .map_err(|e| (ErrorKind::IoError, "Encode error", e.to_string()))?; + let job = C::encode(&RedisJob { ctx, job }) + .map_err(|e| (ErrorKind::IoError, "Encode error", e.into().to_string()))?; push_job .key(job_data_hash) .key(active_jobs_list) @@ -729,10 +726,8 @@ where ..Default::default() }; let job = RedisJob { job, ctx }; - let job = self - .codec - .encode(&job) - .map_err(|e| (ErrorKind::IoError, "Encode error", e.to_string()))?; + let job = C::encode(&job) + .map_err(|e| (ErrorKind::IoError, "Encode error", e.into().to_string()))?; schedule_job .key(job_data_hash) .key(scheduled_jobs_set) @@ -769,18 +764,14 @@ where .await?; let bytes = deserialize_job(&data)?; - let inner = self - .codec - .decode(bytes) - .map_err(|e| (ErrorKind::IoError, "Decode error", e.to_string()))?; + let inner: RedisJob = C::decode(bytes.to_vec()) + .map_err(|e| (ErrorKind::IoError, "Decode error", e.into().to_string()))?; Ok(Some(inner.into())) } async fn update(&mut self, job: Request) -> Result<(), RedisError> { - let job = job.try_into()?; - let bytes = self - .codec - .encode(&job) - .map_err(|e| (ErrorKind::IoError, "Encode error", e.to_string()))?; + let job: RedisJob = job.try_into()?; + let bytes = C::encode(&job) + .map_err(|e| (ErrorKind::IoError, "Encode error", e.into().to_string()))?; let _: i64 = redis::cmd("HSET") .arg(&self.config.job_data_hash()) .arg(job.ctx.id.to_string()) @@ -800,10 +791,8 @@ where .get::() .cloned() .ok_or((ErrorKind::IoError, "Missing WorkerId"))?; - let job = self - .codec - .encode(&(job.try_into()?)) - .map_err(|e| (ErrorKind::IoError, "Encode error", e.to_string()))?; + let job = C::encode::>(job.try_into()?) + .map_err(|e| (ErrorKind::IoError, "Encode error", e.into().to_string()))?; let job_data_hash = self.config.job_data_hash(); let scheduled_jobs_set = self.config.scheduled_jobs_set(); let on: i64 = Utc::now().timestamp(); @@ -847,7 +836,11 @@ where } } -impl RedisStorage { +impl RedisStorage +where + Conn: ConnectionLike + Send + Sync + 'static, + C: Codec> + Send + 'static, +{ /// Attempt to retry a job pub async fn retry(&mut self, worker_id: &WorkerId, task_id: &TaskId) -> Result where @@ -875,10 +868,8 @@ impl RedisStorage { self.kill(worker_id, task_id).await?; return Ok(1); } - let job = self - .codec - .encode(&(job.try_into()?)) - .map_err(|e| (ErrorKind::IoError, "Encode error", e.to_string()))?; + let job = C::encode::>(job.try_into()?) + .map_err(|e| (ErrorKind::IoError, "Encode error", e.into().to_string()))?; let res: Result = retry_job .key(inflight_set) diff --git a/packages/apalis-sql/src/mysql.rs b/packages/apalis-sql/src/mysql.rs index e9030b04..2a1c2f71 100644 --- a/packages/apalis-sql/src/mysql.rs +++ b/packages/apalis-sql/src/mysql.rs @@ -10,7 +10,7 @@ use apalis_core::storage::Storage; use apalis_core::task::namespace::Namespace; use apalis_core::task::task_id::TaskId; use apalis_core::worker::WorkerId; -use apalis_core::{Backend, BoxCodec}; +use apalis_core::{Backend, Codec}; use async_stream::try_stream; use chrono::{DateTime, Utc}; use futures::{Stream, StreamExt, TryStreamExt}; @@ -21,6 +21,7 @@ use sqlx::mysql::MySqlRow; use sqlx::{MySql, Pool, Row}; use std::any::type_name; use std::convert::TryInto; +use std::fmt::Debug; use std::sync::Arc; use std::{fmt, io}; use std::{marker::PhantomData, ops::Add, time::Duration}; @@ -33,35 +34,39 @@ pub use sqlx::mysql::MySqlPool; /// Represents a [Storage] that persists to MySQL -pub struct MysqlStorage { +pub struct MysqlStorage> +where + C: Codec, +{ pool: Pool, job_type: PhantomData, controller: Controller, config: Config, - codec: BoxCodec, - ack_notify: Notify<( - SqlContext, - Result, - )>, + codec: PhantomData, + ack_notify: Notify<(SqlContext, Result)>, } -impl fmt::Debug for MysqlStorage { +impl fmt::Debug for MysqlStorage +where + C: Debug + Codec, + C::Compact: Debug, +{ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("MysqlStorage") .field("pool", &self.pool) .field("job_type", &"PhantomData") .field("controller", &self.controller) .field("config", &self.config) - .field( - "codec", - &"Arc + Sync + Send + 'static>>", - ) + .field("codec", &self.codec) .field("ack_notify", &self.ack_notify) .finish() } } -impl Clone for MysqlStorage { +impl Clone for MysqlStorage +where + C: Debug + Codec, +{ fn clone(&self) -> Self { let pool = self.pool.clone(); MysqlStorage { @@ -75,7 +80,7 @@ impl Clone for MysqlStorage { } } -impl MysqlStorage<()> { +impl MysqlStorage<(), JsonCodec> { /// Get mysql migrations without running them #[cfg(feature = "migrate")] pub fn migrations() -> sqlx::migrate::Migrator { @@ -90,7 +95,11 @@ impl MysqlStorage<()> { } } -impl MysqlStorage { +impl MysqlStorage +where + T: Serialize + DeserializeOwned, + C: Codec, +{ /// Create a new instance from a pool pub fn new(pool: MySqlPool) -> Self { Self::new_with_config(pool, Config::new(type_name::())) @@ -103,8 +112,8 @@ impl MysqlStorage { job_type: PhantomData, controller: Controller::new(), config, - codec: Arc::new(Box::new(JsonCodec)), ack_notify: Notify::new(), + codec: PhantomData, } } @@ -114,7 +123,8 @@ impl MysqlStorage { } /// Expose the codec - pub fn codec(&self) -> &BoxCodec { + #[doc(hidden)] + pub fn codec(&self) -> &PhantomData { &self.codec } @@ -124,7 +134,11 @@ impl MysqlStorage { } } -impl MysqlStorage { +impl MysqlStorage +where + T: DeserializeOwned + Send + Unpin + Sync + 'static, + C: Codec + Send + 'static, +{ fn stream_jobs( self, worker_id: &WorkerId, @@ -174,9 +188,7 @@ impl MysqlStorage { for job in jobs { yield { let (req, ctx) = job.into_tuple(); - let req = self - .codec - .decode(&req) + let req = C::decode(req) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e))) .unwrap(); let req = SqlRequest::new(req, ctx); @@ -214,9 +226,10 @@ impl MysqlStorage { } } -impl Storage for MysqlStorage +impl Storage for MysqlStorage where T: Serialize + DeserializeOwned + Send + 'static + Unpin + Sync, + C: Codec + Send, { type Job = T; @@ -230,9 +243,7 @@ where "INSERT INTO jobs VALUES (?, ?, ?, 'Pending', 0, 25, now(), NULL, NULL, NULL, NULL)"; let pool = self.pool.clone(); - let job = self - .codec - .encode(&job) + let job = C::encode(job) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; let job_type = self.config.namespace.clone(); sqlx::query(query) @@ -250,9 +261,7 @@ where let pool = self.pool.clone(); let id = TaskId::new(); - let job = self - .codec - .encode(&job) + let job = C::encode(job) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; let job_type = self.config.namespace.clone(); @@ -281,9 +290,7 @@ where None => Ok(None), Some(job) => Ok(Some({ let (req, ctx) = job.into_tuple(); - let req = self - .codec - .decode(&req) + let req = C::decode(req) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; let req = SqlRequest::new(req, ctx); let mut req: Request = req.into(); @@ -372,12 +379,14 @@ where } } -impl Backend, Res> - for MysqlStorage +impl Backend, Res> for MysqlStorage +where + T: Serialize + DeserializeOwned + Sync + Send + Unpin + 'static, + C: Debug + Codec + Clone + Send + 'static, { type Stream = BackendStream>>; - type Layer = AckLayer, T, Res>; + type Layer = AckLayer, T, Res>; fn poll(self, worker: WorkerId) -> Poller { let layer = AckLayer::new(self.clone()); @@ -438,7 +447,12 @@ impl Backe } } -impl Ack for MysqlStorage { +impl Ack for MysqlStorage +where + T: Sync + Send, + Res: Serialize + Send + 'static + Sync, + C: Codec + Send, +{ type Context = SqlContext; type AckError = sqlx::Error; async fn ack( @@ -450,8 +464,8 @@ impl Ack for Mys .notify(( ctx.clone(), res.as_ref() - .map(|r| serde_json::to_value(&r).unwrap()) - .map_err(|c| c.clone()), + .map_err(|c| c.clone()) + .and_then(|r| C::encode(r).map_err(|e| Error::SourceError(Arc::new(e.into())))), )) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::BrokenPipe, e)))?; @@ -459,7 +473,7 @@ impl Ack for Mys } } -impl MysqlStorage { +impl MysqlStorage { /// Kill a job pub async fn kill(&mut self, worker_id: &WorkerId, job_id: &TaskId) -> Result<(), sqlx::Error> { let pool = self.pool.clone(); diff --git a/packages/apalis-sql/src/postgres.rs b/packages/apalis-sql/src/postgres.rs index c4e4b642..5f575cad 100644 --- a/packages/apalis-sql/src/postgres.rs +++ b/packages/apalis-sql/src/postgres.rs @@ -52,13 +52,14 @@ use apalis_core::storage::Storage; use apalis_core::task::namespace::Namespace; use apalis_core::task::task_id::TaskId; use apalis_core::worker::WorkerId; -use apalis_core::{Backend, BoxCodec}; +use apalis_core::{Backend, Codec}; use chrono::{DateTime, Utc}; use futures::channel::mpsc; use futures::StreamExt; use futures::{select, stream, SinkExt}; use log::error; use serde::{de::DeserializeOwned, Serialize}; +use serde_json::Value; use sqlx::postgres::PgListener; use sqlx::{Pool, Postgres, Row}; use std::any::type_name; @@ -76,22 +77,25 @@ use crate::from_row::SqlRequest; /// Represents a [Storage] that persists to Postgres // #[derive(Debug)] -pub struct PostgresStorage { +pub struct PostgresStorage> +where + C: Codec, +{ pool: PgPool, job_type: PhantomData, - codec: BoxCodec, + codec: PhantomData, config: Config, controller: Controller, - ack_notify: Notify<(SqlContext, Result)>, + ack_notify: Notify<(SqlContext, Result)>, subscription: Option, } -impl Clone for PostgresStorage { +impl Clone for PostgresStorage { fn clone(&self) -> Self { PostgresStorage { pool: self.pool.clone(), job_type: PhantomData, - codec: self.codec.clone(), + codec: PhantomData, config: self.config.clone(), controller: self.controller.clone(), ack_notify: self.ack_notify.clone(), @@ -100,28 +104,27 @@ impl Clone for PostgresStorage { } } -impl fmt::Debug for PostgresStorage { +impl fmt::Debug for PostgresStorage { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PostgresStorage") .field("pool", &self.pool) .field("job_type", &"PhantomData") .field("controller", &self.controller) .field("config", &self.config) - .field( - "codec", - &"Arc + Sync + Send + 'static>>", - ) - .field("ack_notify", &self.ack_notify) + .field("codec", &std::any::type_name::()) + .field("ack_notify", &std::any::type_name_of_val(&self.ack_notify)) .finish() } } -impl Backend, Res> - for PostgresStorage +impl Backend, Res> for PostgresStorage +where + T: Serialize + DeserializeOwned + Sync + Send + Unpin + 'static, + C: Codec + Send + 'static, { type Stream = BackendStream>>; - type Layer = AckLayer, T, Res>; + type Layer = AckLayer, T, Res>; fn poll(mut self, worker: WorkerId) -> Poller { let layer = AckLayer::new(self.clone()); @@ -141,8 +144,11 @@ impl Backe .map(|stm| stm.notify.boxed().fuse()) .unwrap_or(stream::iter(vec![]).boxed().fuse()); - async fn fetch_next_batch( - storage: &mut PostgresStorage, + async fn fetch_next_batch< + T: Unpin + DeserializeOwned + Send + 'static, + C: Codec, + >( + storage: &mut PostgresStorage, worker: &WorkerId, tx: &mut mpsc::Sender>, Error>>, ) -> Result<(), Error> { @@ -238,7 +244,7 @@ impl PostgresStorage<()> { } } -impl PostgresStorage { +impl PostgresStorage { /// New Storage from [PgPool] pub fn new(pool: PgPool) -> Self { Self::new_with_config(pool, Config::new(type_name::())) @@ -248,7 +254,7 @@ impl PostgresStorage { Self { pool, job_type: PhantomData, - codec: Arc::new(Box::new(JsonCodec)), + codec: PhantomData, config, controller: Controller::new(), ack_notify: Notify::new(), @@ -265,11 +271,38 @@ impl PostgresStorage { pub fn config(&self) -> &Config { &self.config } +} +impl PostgresStorage { /// Expose the codec - pub fn codec(&self) -> &BoxCodec { + pub fn codec(&self) -> &PhantomData { &self.codec } + + async fn keep_alive_at( + &mut self, + worker_id: &WorkerId, + last_seen: Timestamp, + ) -> Result<(), sqlx::Error> { + let last_seen = DateTime::from_timestamp(last_seen, 0).ok_or(sqlx::Error::Io( + io::Error::new(io::ErrorKind::InvalidInput, "Invalid Timestamp"), + ))?; + let worker_type = self.config.namespace.clone(); + let storage_name = std::any::type_name::(); + let query = "INSERT INTO apalis.workers (id, worker_type, storage_name, layers, last_seen) + VALUES ($1, $2, $3, $4, $5) + ON CONFLICT (id) DO + UPDATE SET last_seen = EXCLUDED.last_seen"; + sqlx::query(query) + .bind(worker_id.to_string()) + .bind(worker_type) + .bind(storage_name) + .bind(std::any::type_name::()) + .bind(last_seen) + .execute(&self.pool) + .await?; + Ok(()) + } } /// A listener that listens to Postgres notifications @@ -331,7 +364,11 @@ impl PgListen { } } -impl PostgresStorage { +impl PostgresStorage +where + T: DeserializeOwned + Send + Unpin + 'static, + C: Codec, +{ async fn fetch_next(&mut self, worker_id: &WorkerId) -> Result>, sqlx::Error> { let config = &self.config; let job_type = &config.namespace; @@ -350,9 +387,7 @@ impl PostgresStorage { .into_iter() .map(|job| { let (req, ctx) = job.into_tuple(); - let req = self - .codec - .decode(&req) + let req = C::decode(req) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e))) .unwrap(); let req = SqlRequest::new(req, ctx); @@ -363,36 +398,12 @@ impl PostgresStorage { .collect(); Ok(jobs) } - - async fn keep_alive_at( - &mut self, - worker_id: &WorkerId, - last_seen: Timestamp, - ) -> Result<(), sqlx::Error> { - let last_seen = DateTime::from_timestamp(last_seen, 0).ok_or(sqlx::Error::Io( - io::Error::new(io::ErrorKind::InvalidInput, "Invalid Timestamp"), - ))?; - let worker_type = self.config.namespace.clone(); - let storage_name = std::any::type_name::(); - let query = "INSERT INTO apalis.workers (id, worker_type, storage_name, layers, last_seen) - VALUES ($1, $2, $3, $4, $5) - ON CONFLICT (id) DO - UPDATE SET last_seen = EXCLUDED.last_seen"; - sqlx::query(query) - .bind(worker_id.to_string()) - .bind(worker_type) - .bind(storage_name) - .bind(std::any::type_name::()) - .bind(last_seen) - .execute(&self.pool) - .await?; - Ok(()) - } } -impl Storage for PostgresStorage +impl Storage for PostgresStorage where T: Serialize + DeserializeOwned + Send + 'static + Unpin + Sync, + C: Codec + Send + 'static, { type Job = T; @@ -411,9 +422,7 @@ where let id = TaskId::new(); let query = "INSERT INTO apalis.jobs VALUES ($1, $2, $3, 'Pending', 0, 25, NOW() , NULL, NULL, NULL, NULL)"; - let job = self - .codec - .encode(&job) + let job = C::encode(&job) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; let job_type = self.config.namespace.clone(); sqlx::query(query) @@ -431,9 +440,7 @@ where let id = TaskId::new(); let on = DateTime::from_timestamp(on, 0); - let job = self - .codec - .encode(&job) + let job = C::encode(&job) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidInput, e)))?; let job_type = self.config.namespace.clone(); sqlx::query(query) @@ -460,11 +467,8 @@ where None => Ok(None), Some(job) => Ok(Some({ let (req, ctx) = job.into_tuple(); - let req = self - .codec - .decode(&req) - .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e))) - .unwrap(); + let req = C::decode(req) + .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; let req = SqlRequest::new(req, ctx); let mut req: Request = req.into(); req.insert(Namespace(self.config.namespace.clone())); @@ -546,7 +550,12 @@ where } } -impl Ack for PostgresStorage { +impl Ack for PostgresStorage +where + T: Sync + Send, + Res: Serialize + Sync, + C: Codec + Send, +{ type Context = SqlContext; type AckError = sqlx::Error; async fn ack( @@ -558,7 +567,13 @@ impl Ack for PostgresStorage { .notify(( ctx.clone(), res.as_ref() - .map(|r| serde_json::to_value(r).unwrap()) + .map(|r| { + C::encode(r) + .map_err(|e| { + sqlx::Error::Io(io::Error::new(io::ErrorKind::Interrupted, e)) + }) + .unwrap() + }) .map_err(|e| e.clone()), )) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::Interrupted, e)))?; @@ -567,7 +582,7 @@ impl Ack for PostgresStorage { } } -impl PostgresStorage { +impl PostgresStorage { /// Kill a job pub async fn kill( &mut self, diff --git a/packages/apalis-sql/src/sqlite.rs b/packages/apalis-sql/src/sqlite.rs index 42bda01f..8b726e23 100644 --- a/packages/apalis-sql/src/sqlite.rs +++ b/packages/apalis-sql/src/sqlite.rs @@ -11,7 +11,7 @@ use apalis_core::storage::Storage; use apalis_core::task::namespace::Namespace; use apalis_core::task::task_id::TaskId; use apalis_core::worker::WorkerId; -use apalis_core::{Backend, BoxCodec}; +use apalis_core::{Backend, Codec}; use async_stream::try_stream; use chrono::Utc; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; @@ -27,32 +27,24 @@ use crate::from_row::SqlRequest; pub use sqlx::sqlite::SqlitePool; -/// The code used to encode Sqlite jobs. -/// -/// Currently uses JSON -pub type SqliteCodec = BoxCodec; - /// Represents a [Storage] that persists to Sqlite // #[derive(Debug)] -pub struct SqliteStorage { +pub struct SqliteStorage> { pool: Pool, job_type: PhantomData, controller: Controller, config: Config, - codec: SqliteCodec, + codec: PhantomData, } -impl fmt::Debug for SqliteStorage { +impl fmt::Debug for SqliteStorage { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("MysqlStorage") .field("pool", &self.pool) .field("job_type", &"PhantomData") .field("controller", &self.controller) .field("config", &self.config) - .field( - "codec", - &"Arc + Sync + Send + 'static>>", - ) + .field("codec", &std::any::type_name::()) .finish() } } @@ -102,7 +94,7 @@ impl SqliteStorage { job_type: PhantomData, controller: Controller::new(), config: Config::new(type_name::()), - codec: Arc::new(Box::new(JsonCodec)), + codec: PhantomData, } } @@ -113,7 +105,7 @@ impl SqliteStorage { job_type: PhantomData, controller: Controller::new(), config, - codec: Arc::new(Box::new(JsonCodec)), + codec: PhantomData, } } /// Keeps a storage notified that the worker is still alive manually @@ -144,17 +136,19 @@ impl SqliteStorage { &self.pool } - /// Expose the code used - pub fn codec(&self) -> &SqliteCodec { - &self.codec - } - /// Get the config used by the storage pub fn get_config(&self) -> &Config { &self.config } } +impl SqliteStorage { + /// Expose the code used + pub fn codec(&self) -> &PhantomData { + &self.codec + } +} + async fn fetch_next( pool: &Pool, worker_id: &WorkerId, @@ -174,7 +168,11 @@ async fn fetch_next( Ok(job) } -impl SqliteStorage { +impl SqliteStorage +where + T: DeserializeOwned + Send + Unpin, + C: Codec, +{ fn stream_jobs( &self, worker_id: &WorkerId, @@ -183,7 +181,6 @@ impl SqliteStorage { ) -> impl Stream>, sqlx::Error>> { let pool = self.pool.clone(); let worker_id = worker_id.clone(); - let codec = self.codec.clone(); let config = self.config.clone(); try_stream! { loop { @@ -205,8 +202,7 @@ impl SqliteStorage { None => None::>, Some(job) => { let (req, ctx) = job.into_tuple(); - let req = codec - .decode(&req) + let req = C::decode(req) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; let req = SqlRequest::new(req, ctx); let mut req: Request = req.into(); @@ -221,9 +217,10 @@ impl SqliteStorage { } } -impl Storage for SqliteStorage +impl Storage for SqliteStorage where T: Serialize + DeserializeOwned + Send + 'static + Unpin + Sync, + C: Codec + Send, { type Job = T; @@ -235,9 +232,7 @@ where let id = TaskId::new(); let query = "INSERT INTO Jobs VALUES (?1, ?2, ?3, 'Pending', 0, 25, strftime('%s','now'), NULL, NULL, NULL, NULL)"; - let job = self - .codec - .encode(&job) + let job = C::encode(&job) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; let job_type = self.config.namespace.clone(); sqlx::query(query) @@ -254,9 +249,7 @@ where "INSERT INTO Jobs VALUES (?1, ?2, ?3, 'Pending', 0, 25, ?4, NULL, NULL, NULL, NULL)"; let id = TaskId::new(); - let job = self - .codec - .encode(&job) + let job = C::encode(&job) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; let job_type = self.config.namespace.clone(); sqlx::query(query) @@ -282,11 +275,8 @@ where None => Ok(None), Some(job) => Ok(Some({ let (req, ctx) = job.into_tuple(); - let req = self - .codec - .decode(&req) - .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e))) - .unwrap(); + let req = C::decode(req) + .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; let req = SqlRequest::new(req, ctx); let mut req: Request = req.into(); req.insert(Namespace(self.config.namespace.clone())); From 39fd007a4ba3bfaaeb9734ee95afc738b10961cf Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Sat, 3 Aug 2024 15:40:18 +0300 Subject: [PATCH 40/59] Chore/more examples (#389) * add: catch-panic example * add: graceful shutdown example * add: unmonitored example * add: arguments example * fix: minor updates * fix: sql tests * fix: minor updates --- Cargo.toml | 5 +- examples/catch-panic/Cargo.toml | 24 +++++++++ examples/catch-panic/src/main.rs | 54 +++++++++++++++++++ examples/email-service/src/lib.rs | 4 +- examples/fn-args/Cargo.toml | 22 ++++++++ examples/fn-args/src/main.rs | 71 +++++++++++++++++++++++++ examples/graceful-shutdown/Cargo.toml | 20 +++++++ examples/graceful-shutdown/src/main.rs | 48 +++++++++++++++++ examples/redis-mq-example/src/main.rs | 4 +- examples/redis/src/main.rs | 17 +----- examples/unmonitored-worker/Cargo.toml | 22 ++++++++ examples/unmonitored-worker/src/main.rs | 41 ++++++++++++++ packages/apalis-core/src/error.rs | 2 +- packages/apalis-core/src/worker/mod.rs | 10 +++- packages/apalis-redis/src/storage.rs | 2 +- packages/apalis-sql/src/context.rs | 4 +- packages/apalis-sql/src/lib.rs | 7 +-- packages/apalis-sql/src/mysql.rs | 2 +- packages/apalis-sql/src/postgres.rs | 2 +- packages/apalis-sql/src/sqlite.rs | 6 +-- src/layers/catch_panic/mod.rs | 15 +++--- 21 files changed, 336 insertions(+), 46 deletions(-) create mode 100644 examples/catch-panic/Cargo.toml create mode 100644 examples/catch-panic/src/main.rs create mode 100644 examples/fn-args/Cargo.toml create mode 100644 examples/fn-args/src/main.rs create mode 100644 examples/graceful-shutdown/Cargo.toml create mode 100644 examples/graceful-shutdown/src/main.rs create mode 100644 examples/unmonitored-worker/Cargo.toml create mode 100644 examples/unmonitored-worker/src/main.rs diff --git a/Cargo.toml b/Cargo.toml index 68e2cead..caf534e6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,7 +38,7 @@ limit = ["tower/limit"] ## Support filtering jobs based on a predicate filter = ["tower/filter"] ## Captures panics in executions and convert them to errors -catch-panic = ["dep:backtrace"] +catch-panic = [] ## Compatibility with async-std and smol runtimes async-std-comp = ["async-std"] ## Compatibility with tokio and actix runtimes @@ -120,7 +120,7 @@ members = [ "examples/redis-with-msg-pack", "examples/redis-deadpool", "examples/redis-mq-example", - "examples/cron", + "examples/cron", "examples/catch-panic", "examples/graceful-shutdown", "examples/unmonitored-worker", "examples/fn-args", ] @@ -141,7 +141,6 @@ pin-project-lite = "0.2.14" uuid = { version = "1.8", optional = true } ulid = { version = "1", optional = true } serde = { version = "1.0", features = ["derive"] } -backtrace = { version = "0.3", optional = true } [dependencies.tracing] default-features = false diff --git a/examples/catch-panic/Cargo.toml b/examples/catch-panic/Cargo.toml new file mode 100644 index 00000000..0e6fa537 --- /dev/null +++ b/examples/catch-panic/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "catch-panic" +version = "0.1.0" +edition.workspace = true +repository.workspace = true + +[dependencies] +anyhow = "1" +tokio = { version = "1", features = ["full"] } +apalis = { path = "../../", features = ["limit", "tracing", "tokio-comp", "catch-panic"] } +apalis-sql = { path = "../../packages/apalis-sql", features = ["sqlite"] } +serde = { version = "1", features = ["derive"] } +tracing-subscriber = "0.3.11" +email-service = { path = "../email-service" } + + +[dependencies.tracing] +default-features = false +version = "0.1" + +[dependencies.sqlx] +version = "0.8" +default-features = false +features = ["sqlite", "runtime-tokio"] diff --git a/examples/catch-panic/src/main.rs b/examples/catch-panic/src/main.rs new file mode 100644 index 00000000..4eca4e0a --- /dev/null +++ b/examples/catch-panic/src/main.rs @@ -0,0 +1,54 @@ +use anyhow::Result; +use apalis::layers::catch_panic::CatchPanicLayer; +use apalis::utils::TokioExecutor; +use apalis::{layers::tracing::TraceLayer, prelude::*}; +use apalis_sql::sqlite::SqliteStorage; + +use email_service::Email; +use sqlx::SqlitePool; + +async fn produce_emails(storage: &mut SqliteStorage) -> Result<()> { + for i in 0..2 { + storage + .push(Email { + to: format!("test{i}@example.com"), + text: "Test background job from apalis".to_string(), + subject: "Background email job".to_string(), + }) + .await?; + } + Ok(()) +} + +async fn send_email(_: Email) { + unimplemented!("panic from unimplemented") +} + +#[tokio::main] +async fn main() -> Result<()> { + std::env::set_var("RUST_LOG", "debug,sqlx::query=info"); + tracing_subscriber::fmt::init(); + + let pool = SqlitePool::connect("sqlite::memory:").await?; + // Do migrations: Mainly for "sqlite::memory:" + SqliteStorage::setup(&pool) + .await + .expect("unable to run migrations for sqlite"); + + let mut email_storage: SqliteStorage = SqliteStorage::new(pool.clone()); + + produce_emails(&mut email_storage).await?; + + Monitor::::new() + .register_with_count(2, { + WorkerBuilder::new("tasty-banana") + .layer(CatchPanicLayer::new()) + .layer(TraceLayer::new()) + .backend(email_storage) + .build_fn(send_email) + }) + .on_event(|e| tracing::info!("{e:?}")) + .run() + .await?; + Ok(()) +} diff --git a/examples/email-service/src/lib.rs b/examples/email-service/src/lib.rs index 467de899..252fee6e 100644 --- a/examples/email-service/src/lib.rs +++ b/examples/email-service/src/lib.rs @@ -20,7 +20,9 @@ pub async fn send_email(job: Email) -> Result<(), Error> { } Err(email_address::Error::InvalidCharacter) => { log::error!("Killed send email job. Invalid character {}", job.to); - Err(Error::Abort(String::from("Invalid character. Job killed"))) + Err(Error::Abort(Arc::new(Box::new( + email_address::Error::InvalidCharacter, + )))) } Err(e) => Err(Error::Failed(Arc::new(Box::new(e)))), } diff --git a/examples/fn-args/Cargo.toml b/examples/fn-args/Cargo.toml new file mode 100644 index 00000000..268e2dc8 --- /dev/null +++ b/examples/fn-args/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "fn-args" +version = "0.1.0" +edition.workspace = true +repository.workspace = true + +[dependencies] +tokio = { version = "1", features = ["full"] } +apalis = { path = "../../", features = ["limit", "tokio-comp", "catch-panic"] } +apalis-sql = { path = "../../packages/apalis-sql", features = [ + "sqlite", + "tokio-comp", +] } +serde = "1" +tracing-subscriber = "0.3.11" +futures = "0.3" +tower = "0.4" + + +[dependencies.tracing] +default-features = false +version = "0.1" diff --git a/examples/fn-args/src/main.rs b/examples/fn-args/src/main.rs new file mode 100644 index 00000000..5fd614cb --- /dev/null +++ b/examples/fn-args/src/main.rs @@ -0,0 +1,71 @@ +use std::{ + ops::Deref, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, +}; + +use apalis::{prelude::*, utils::TokioExecutor}; +use apalis_sql::{ + context::SqlContext, + sqlite::{SqlitePool, SqliteStorage}, +}; +use serde::{Deserialize, Serialize}; +use tracing::info; + +#[derive(Debug, Serialize, Deserialize)] +struct SimpleJob {} + +// A task can have up to 16 arguments +async fn simple_job( + _: SimpleJob, // Required, must be of the type of the job/message + worker_id: WorkerId, // The worker running the job, added by worker + _worker_ctx: Context, // The worker context, added by worker + _sqlite: Data>, // The source, added by storage + task_id: Data, // The task id, added by storage + ctx: Data, // The task context, added by storage + count: Data, // Our custom data added via layer +) { + // increment the counter + let current = count.fetch_add(1, Ordering::Relaxed); + info!("worker: {worker_id}; task_id: {task_id:?}, ctx: {ctx:?}, count: {current:?}"); +} + +async fn produce_jobs(storage: &mut SqliteStorage) { + for _ in 0..10 { + storage.push(SimpleJob {}).await.unwrap(); + } +} + +#[derive(Clone, Debug, Default)] +struct Count(Arc); + +impl Deref for Count { + type Target = Arc; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +#[tokio::main] +async fn main() -> Result<(), std::io::Error> { + std::env::set_var("RUST_LOG", "debug,sqlx::query=error"); + tracing_subscriber::fmt::init(); + let pool = SqlitePool::connect("sqlite::memory:").await.unwrap(); + SqliteStorage::setup(&pool) + .await + .expect("unable to run migrations for sqlite"); + let mut sqlite: SqliteStorage = SqliteStorage::new(pool); + produce_jobs(&mut sqlite).await; + Monitor::::new() + .register_with_count(2, { + WorkerBuilder::new("tasty-banana") + .data(Count::default()) + .backend(sqlite) + .build_fn(simple_job) + }) + .run() + .await?; + Ok(()) +} diff --git a/examples/graceful-shutdown/Cargo.toml b/examples/graceful-shutdown/Cargo.toml new file mode 100644 index 00000000..cb096bf1 --- /dev/null +++ b/examples/graceful-shutdown/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "graceful-shutdown" +version = "0.1.0" +edition.workspace = true +repository.workspace = true + +[dependencies] +thiserror = "1" +tokio = { version = "1", features = ["full"] } +apalis = { path = "../../", features = ["limit", "tokio-comp", "catch-panic"] } +apalis-sql = { path = "../../packages/apalis-sql", features = ["sqlite", "tokio-comp"]} +serde = "1" +tracing-subscriber = "0.3.11" +futures = "0.3" +tower = "0.4" + + +[dependencies.tracing] +default-features = false +version = "0.1" diff --git a/examples/graceful-shutdown/src/main.rs b/examples/graceful-shutdown/src/main.rs new file mode 100644 index 00000000..3a2006a5 --- /dev/null +++ b/examples/graceful-shutdown/src/main.rs @@ -0,0 +1,48 @@ +use std::time::Duration; + +use apalis::{prelude::*, utils::TokioExecutor}; +use apalis_sql::sqlite::{SqlitePool, SqliteStorage}; +use serde::{Deserialize, Serialize}; +use tracing::info; + +#[derive(Debug, Serialize, Deserialize)] +struct LongRunningJob {} + +async fn long_running_task(_task: LongRunningJob, worker_ctx: Context) { + loop { + tokio::time::sleep(Duration::from_secs(1)).await; // Do some hard thing + info!("is_shutting_down: {}", worker_ctx.is_shutting_down(),); + if worker_ctx.is_shutting_down() { + info!("saving the job state"); + break; + } + } +} + +async fn produce_jobs(storage: &mut SqliteStorage) { + storage.push(LongRunningJob {}).await.unwrap(); +} + +#[tokio::main] +async fn main() -> Result<(), std::io::Error> { + std::env::set_var("RUST_LOG", "debug,sqlx::query=error"); + tracing_subscriber::fmt::init(); + let pool = SqlitePool::connect("sqlite::memory:").await.unwrap(); + SqliteStorage::setup(&pool) + .await + .expect("unable to run migrations for sqlite"); + let mut sqlite: SqliteStorage = SqliteStorage::new(pool); + produce_jobs(&mut sqlite).await; + Monitor::::new() + .register_with_count(2, { + WorkerBuilder::new("tasty-banana") + .backend(sqlite) + .build_fn(long_running_task) + }) + // Wait 10 seconds after shutdown is triggered to allow any incomplete jobs to complete + .shutdown_timeout(Duration::from_secs(10)) + // Use .run() if you don't want without signals + .run_with_signal(tokio::signal::ctrl_c()) // This will wait for ctrl+c then gracefully shutdown + .await?; + Ok(()) +} diff --git a/examples/redis-mq-example/src/main.rs b/examples/redis-mq-example/src/main.rs index 4fd17daa..1b3f2456 100644 --- a/examples/redis-mq-example/src/main.rs +++ b/examples/redis-mq-example/src/main.rs @@ -29,7 +29,7 @@ impl Clone for RedisMq { conn: self.conn.clone(), msg_type: PhantomData, config: self.config.clone(), - codec: self.codec.clone(), + codec: self.codec, } } } @@ -86,7 +86,7 @@ where _res: &Result, ) -> Result<(), Self::AckError> { self.conn - .delete_message(self.config.get_namespace(), &ctx) + .delete_message(self.config.get_namespace(), ctx) .await?; Ok(()) } diff --git a/examples/redis/src/main.rs b/examples/redis/src/main.rs index b0c9dd9d..5e0723e2 100644 --- a/examples/redis/src/main.rs +++ b/examples/redis/src/main.rs @@ -1,8 +1,4 @@ -use std::{ - ops::Deref, - sync::{atomic::AtomicUsize, Arc}, - time::Duration, -}; +use std::{sync::Arc, time::Duration}; use anyhow::Result; use apalis::layers::limit::RateLimitLayer; @@ -25,16 +21,6 @@ async fn produce_jobs(mut storage: RedisStorage) -> Result<()> { Ok(()) } -#[derive(Clone, Debug, Default)] -struct Count(Arc); - -impl Deref for Count { - type Target = Arc; - fn deref(&self) -> &Self::Target { - &self.0 - } -} - #[tokio::main] async fn main() -> Result<()> { std::env::set_var("RUST_LOG", "debug"); @@ -50,7 +36,6 @@ async fn main() -> Result<()> { .chain(|svc| svc.map_err(|e| Error::Failed(Arc::new(e)))) .layer(RateLimitLayer::new(5, Duration::from_secs(1))) .layer(TimeoutLayer::new(Duration::from_millis(500))) - .data(Count::default()) .backend(storage) .build_fn(send_email); diff --git a/examples/unmonitored-worker/Cargo.toml b/examples/unmonitored-worker/Cargo.toml new file mode 100644 index 00000000..926aba98 --- /dev/null +++ b/examples/unmonitored-worker/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "unmonitored-worker" +version = "0.1.0" +edition.workspace = true +repository.workspace = true + +[dependencies] +tokio = { version = "1", features = ["full"] } +apalis = { path = "../../", features = ["limit", "tokio-comp", "catch-panic"] } +apalis-sql = { path = "../../packages/apalis-sql", features = [ + "sqlite", + "tokio-comp", +] } +serde = "1" +tracing-subscriber = "0.3.11" +futures = "0.3" +tower = "0.4" + + +[dependencies.tracing] +default-features = false +version = "0.1" diff --git a/examples/unmonitored-worker/src/main.rs b/examples/unmonitored-worker/src/main.rs new file mode 100644 index 00000000..7c14a1bc --- /dev/null +++ b/examples/unmonitored-worker/src/main.rs @@ -0,0 +1,41 @@ +use std::time::Duration; + +use apalis::{prelude::*, utils::TokioExecutor}; +use apalis_sql::sqlite::{SqlitePool, SqliteStorage}; +use serde::{Deserialize, Serialize}; +use tracing::info; + +#[derive(Debug, Serialize, Deserialize)] +struct SelfMonitoringJob {} + +async fn self_monitoring_task(task: SelfMonitoringJob, worker_ctx: Context) { + info!("task: {:?}, {:?}", task, worker_ctx); + tokio::time::sleep(Duration::from_secs(5)).await; // Do some hard thing + info!("done with task, stopping worker gracefully"); + // use worker_ctx.force_stop() to stop immediately + worker_ctx.stop(); +} + +async fn produce_jobs(storage: &mut SqliteStorage) { + storage.push(SelfMonitoringJob {}).await.unwrap(); +} + +#[tokio::main] +async fn main() -> Result<(), std::io::Error> { + std::env::set_var("RUST_LOG", "debug,sqlx::query=error"); + tracing_subscriber::fmt::init(); + let pool = SqlitePool::connect("sqlite::memory:").await.unwrap(); + SqliteStorage::setup(&pool) + .await + .expect("unable to run migrations for sqlite"); + let mut sqlite: SqliteStorage = SqliteStorage::new(pool); + produce_jobs(&mut sqlite).await; + + WorkerBuilder::new("tasty-banana") + .backend(sqlite) + .build_fn(self_monitoring_task) + .with_executor(TokioExecutor) + .run() + .await; + Ok(()) +} diff --git a/packages/apalis-core/src/error.rs b/packages/apalis-core/src/error.rs index 6c812863..64f1f666 100644 --- a/packages/apalis-core/src/error.rs +++ b/packages/apalis-core/src/error.rs @@ -24,7 +24,7 @@ pub enum Error { /// Execution was aborted #[error("AbortError: {0}")] - Abort(String), + Abort(#[source] Arc), /// Encountered an error during worker execution #[error("WorkerError: {0}")] diff --git a/packages/apalis-core/src/worker/mod.rs b/packages/apalis-core/src/worker/mod.rs index e5eb6b88..a4c1f95a 100644 --- a/packages/apalis-core/src/worker/mod.rs +++ b/packages/apalis-core/src/worker/mod.rs @@ -609,7 +609,13 @@ impl Context { } } - /// Calling this function triggers shutting down the worker + /// Calling this function triggers shutting down the worker without waiting for any tasks to complete + pub fn force_stop(&self) { + self.task_count.store(WORKER_FUTURES, Ordering::Relaxed); + self.stop(); + } + + /// Calling this function triggers shutting down the worker while waiting for any tasks to complete pub fn stop(&self) { self.running.store(false, Ordering::Relaxed); self.wake() @@ -643,7 +649,7 @@ impl Context { self.context .as_ref() .map(|s| s.shutdown().is_shutting_down()) - .unwrap_or(false) + .unwrap_or(!self.is_running()) } fn add_waker(&self, cx: &mut TaskCtx<'_>) { diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index 780ef1c6..34f10243 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -573,7 +573,7 @@ where .key(self.config.job_data_hash()) .arg(ctx.id.to_string()) .arg(now) - .arg(e) + .arg(e.to_string()) .invoke_async(&mut self.conn) .await } diff --git a/packages/apalis-sql/src/context.rs b/packages/apalis-sql/src/context.rs index 5aed23ef..fbbf77ee 100644 --- a/packages/apalis-sql/src/context.rs +++ b/packages/apalis-sql/src/context.rs @@ -117,8 +117,8 @@ impl SqlContext { } /// Set the last error - pub fn set_last_error(&mut self, error: String) { - self.last_error = Some(error); + pub fn set_last_error(&mut self, error: Option) { + self.last_error = error; } /// Record an attempt to execute the request diff --git a/packages/apalis-sql/src/lib.rs b/packages/apalis-sql/src/lib.rs index 03f186ec..b671bb1b 100644 --- a/packages/apalis-sql/src/lib.rs +++ b/packages/apalis-sql/src/lib.rs @@ -164,10 +164,7 @@ macro_rules! sql_storage_tests { .unwrap(); let (job_id, res) = storage.execute_next().await; - assert_eq!( - res, - Err("AbortError: Invalid character. Job killed".to_owned()) - ); + assert_eq!(res, Err("AbortError: Invalid character.".to_owned())); apalis_core::sleep(Duration::from_secs(1)).await; let job = storage.fetch_by_id(&job_id).await.unwrap().unwrap(); let ctx = job.get::().unwrap(); @@ -175,7 +172,7 @@ macro_rules! sql_storage_tests { assert!(ctx.done_at().is_some()); assert_eq!( ctx.last_error().clone().unwrap(), - "{\"Err\":\"AbortError: Invalid character. Job killed\"}" + "{\"Err\":\"AbortError: Invalid character.\"}" ); } diff --git a/packages/apalis-sql/src/mysql.rs b/packages/apalis-sql/src/mysql.rs index 2a1c2f71..2939f3e6 100644 --- a/packages/apalis-sql/src/mysql.rs +++ b/packages/apalis-sql/src/mysql.rs @@ -74,7 +74,7 @@ where job_type: PhantomData, controller: self.controller.clone(), config: self.config.clone(), - codec: self.codec.clone(), + codec: self.codec, ack_notify: self.ack_notify.clone(), } } diff --git a/packages/apalis-sql/src/postgres.rs b/packages/apalis-sql/src/postgres.rs index 5f575cad..53b2bcb1 100644 --- a/packages/apalis-sql/src/postgres.rs +++ b/packages/apalis-sql/src/postgres.rs @@ -112,7 +112,7 @@ impl fmt::Debug for PostgresStorage { .field("controller", &self.controller) .field("config", &self.config) .field("codec", &std::any::type_name::()) - .field("ack_notify", &std::any::type_name_of_val(&self.ack_notify)) + // .field("ack_notify", &std::any::type_name_of_val(&self.ack_notify)) .finish() } } diff --git a/packages/apalis-sql/src/sqlite.rs b/packages/apalis-sql/src/sqlite.rs index 8b726e23..27dcb2af 100644 --- a/packages/apalis-sql/src/sqlite.rs +++ b/packages/apalis-sql/src/sqlite.rs @@ -56,7 +56,7 @@ impl Clone for SqliteStorage { job_type: PhantomData, controller: self.controller.clone(), config: self.config.clone(), - codec: self.codec.clone(), + codec: self.codec, } } } @@ -484,7 +484,7 @@ impl Ack for SqliteStorage { .bind(ctx.id().to_string()) .bind(ctx.lock_by().as_ref().unwrap().to_string()) .bind(result) - .bind(calculate_status(&res).to_string()) + .bind(calculate_status(res).to_string()) .bind(ctx.attempts().current() as i64 + 1) .execute(&pool) .await?; @@ -667,7 +667,7 @@ mod tests { assert!(ctx.done_at().is_none()); assert!(ctx.lock_by().is_some()); assert!(ctx.lock_at().is_some()); - assert_eq!(*ctx.last_error(), Some("".to_string())); //TODO: Fix this + assert_eq!(*ctx.last_error(), None); } #[tokio::test] diff --git a/src/layers/catch_panic/mod.rs b/src/layers/catch_panic/mod.rs index 6fe95879..f4b39d75 100644 --- a/src/layers/catch_panic/mod.rs +++ b/src/layers/catch_panic/mod.rs @@ -7,7 +7,6 @@ use std::task::{Context, Poll}; use apalis_core::error::Error; use apalis_core::request::Request; -use backtrace::Backtrace; use tower::Layer; use tower::Service; @@ -66,19 +65,18 @@ pin_project_lite::pin_project! { pub struct CatchPanicFuture { #[pin] future: F, - } } /// An error generated from a panic #[derive(Debug, Clone)] -pub struct PanicError(pub String, pub Backtrace); +pub struct PanicError(pub String); impl std::error::Error for PanicError {} impl fmt::Display for PanicError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "PanicError: {}, Backtrace: {:?}", self.0, self.1) + write!(f, "PanicError: {}", self.0) } } @@ -88,8 +86,8 @@ where { type Output = Result; - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let this = self.project(); + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.as_mut().project(); match catch_unwind(AssertUnwindSafe(|| this.future.poll(cx))) { Ok(res) => res, @@ -101,9 +99,10 @@ where } else { "Unknown panic".to_string() }; - Poll::Ready(Err(Error::Failed(Arc::new(Box::new(PanicError( + // apalis assumes service functions are pure + // therefore a panic should ideally abort + Poll::Ready(Err(Error::Abort(Arc::new(Box::new(PanicError( panic_info, - Backtrace::new(), )))))) } } From 914bb514921246cabb7bb5e44d7ca69e709646f9 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Mon, 5 Aug 2024 21:52:27 +0300 Subject: [PATCH 41/59] fix: improve on benches (#379) * fix: improve on benches * fix: bench trigger * fix: include tokio for sqlx * fix: improve the benching approach * fix: mysql api * fix: redis api * fix: improve bench approach, remove counter * remove: setup * remove: pg * fix: pg * fix: pg --- .github/workflows/bench.yaml | 1 + Cargo.toml | 14 +++- benches/storages.rs | 125 +++++++++++++++++-------------- packages/apalis-sql/src/mysql.rs | 9 +-- src/layers/mod.rs | 2 + 5 files changed, 83 insertions(+), 68 deletions(-) diff --git a/.github/workflows/bench.yaml b/.github/workflows/bench.yaml index e5d6f3a2..943a0e95 100644 --- a/.github/workflows/bench.yaml +++ b/.github/workflows/bench.yaml @@ -3,6 +3,7 @@ on: paths: - 'packages/**' - '.github/workflows/bench.yaml' + - 'benches/**' name: Benchmark jobs: storageBenchmark: diff --git a/Cargo.toml b/Cargo.toml index caf534e6..0cdfbbaa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -80,7 +80,13 @@ pprof = { version = "0.13", features = ["flamegraph"] } paste = "1.0.14" serde = "1" tokio = { version = "1", features = ["macros", "rt-multi-thread"] } +apalis = { path = ".", features = ["limit"]} apalis-redis = { path = "./packages/apalis-redis" } +apalis-sql = { path = "./packages/apalis-sql", features = [ + "postgres", + "mysql", + "sqlite", +] } redis = { version = "0.25.3", default-features = false, features = [ "tokio-comp", "script", @@ -91,7 +97,7 @@ redis = { version = "0.25.3", default-features = false, features = [ [dev-dependencies.sqlx] version = "0.8.0" default-features = false -features = ["chrono", "mysql", "sqlite", "postgres"] +features = ["chrono", "mysql", "sqlite", "postgres", "runtime-tokio"] [[bench]] @@ -120,7 +126,11 @@ members = [ "examples/redis-with-msg-pack", "examples/redis-deadpool", "examples/redis-mq-example", - "examples/cron", "examples/catch-panic", "examples/graceful-shutdown", "examples/unmonitored-worker", "examples/fn-args", + "examples/cron", + "examples/catch-panic", + "examples/graceful-shutdown", + "examples/unmonitored-worker", + "examples/fn-args", ] diff --git a/benches/storages.rs b/benches/storages.rs index bf6a8397..2ee420a1 100644 --- a/benches/storages.rs +++ b/benches/storages.rs @@ -1,19 +1,18 @@ use apalis::prelude::*; - -use apalis::{ - mysql::{MySqlPool, MysqlStorage}, - postgres::{PgPool, PostgresStorage}, - sqlite::{SqlitePool, SqliteStorage}, -}; use apalis_redis::RedisStorage; +use apalis_sql::mysql::MysqlStorage; +use apalis_sql::postgres::PostgresStorage; +use apalis_sql::sqlite::SqliteStorage; +use apalis_sql::Config; use criterion::*; use futures::Future; use paste::paste; use serde::{Deserialize, Serialize}; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering; -use std::sync::Arc; -use std::time::{Duration, Instant}; +use sqlx::MySqlPool; +use sqlx::PgPool; +use sqlx::SqlitePool; +use std::time::Duration; +use std::time::Instant; use tokio::runtime::Runtime; macro_rules! define_bench { ($name:expr, $setup:expr ) => { @@ -23,50 +22,46 @@ macro_rules! define_bench { let mut group = c.benchmark_group($name); group.sample_size(10); - group.bench_with_input(BenchmarkId::new("consume", size), &size, |b, &s| { + group.bench_with_input(BenchmarkId::new("consume", size), &size, |b, &size| { b.to_async(Runtime::new().unwrap()) - .iter_custom(|iters| async move { - let mut interval = tokio::time::interval(Duration::from_millis(150)); - let storage = { $setup }; - let mut s1 = storage.clone(); - let counter = Counter::default(); - let c = counter.clone(); - tokio::spawn(async move { - Monitor::::new() - .register({ - let worker = - WorkerBuilder::new(format!("{}-bench", $name)) - .data(c) - .backend(storage) - .build_fn(handle_test_job); - worker - }) - .run() - .await - .unwrap(); - }); + .iter(|| async move { - let start = Instant::now(); - for _ in 0..iters { - for _i in 0..s { - let _ = s1.push(TestJob).await; + let mut storage = { $setup }; + let mut s = storage.clone(); + storage.cleanup().await; + tokio::spawn(async move { + for i in 0..=size { + let _ = s.push(TestJob(i)).await; } - while (counter.0.load(Ordering::Relaxed) != s) || (s1.len().await.unwrap_or(-1) != 0) { - interval.tick().await; + }); + async fn handle_test_job( + req: TestJob, + size: Data, + wrk: Context, + ) -> Result<(), Error> { + if req.0 == *size { + wrk.stop(); } - counter.0.store(0, Ordering::Relaxed); + Ok(()) } - let elapsed = start.elapsed(); - s1.cleanup().await; - elapsed + let start = Instant::now(); + WorkerBuilder::new(format!("{}-bench", $name)) + .data(size as usize) + .backend(storage.clone()) + .build_fn(handle_test_job) + .with_executor(TokioExecutor) + .run() + .await; + storage.cleanup().await; + start.elapsed() }) }); group.bench_with_input(BenchmarkId::new("push", size), &size, |b, &s| { b.to_async(Runtime::new().unwrap()).iter(|| async move { let mut storage = { $setup }; let start = Instant::now(); - for _i in 0..s { - let _ = black_box(storage.push(TestJob).await); + for i in 0..s { + let _ = black_box(storage.push(TestJob(i)).await); } start.elapsed() }); @@ -76,15 +71,7 @@ macro_rules! define_bench { } #[derive(Serialize, Deserialize, Debug)] -struct TestJob; -#[derive(Debug, Default, Clone)] -struct Counter(Arc); - -async fn handle_test_job(_req: TestJob, counter: Data) -> Result<(), Error> { - counter.0.fetch_add(1, std::sync::atomic::Ordering::Relaxed); - Ok(()) -} - +struct TestJob(usize); trait CleanUp { fn cleanup(&mut self) -> impl Future + Send; } @@ -110,7 +97,9 @@ impl CleanUp for PostgresStorage { impl CleanUp for MysqlStorage { async fn cleanup(&mut self) { let pool = self.pool(); - let query = "DELETE FROM jobs; DELETE from workers;"; + let query = "DELETE FROM jobs;"; + sqlx::query(query).execute(pool).await.unwrap(); + let query = "DELETE from workers;"; sqlx::query(query).execute(pool).await.unwrap(); } } @@ -128,26 +117,46 @@ impl CleanUp for RedisStorage { define_bench!("sqlite_in_memory", { let pool = SqlitePool::connect("sqlite::memory:").await.unwrap(); let _ = SqliteStorage::setup(&pool).await; - SqliteStorage::new(pool) + SqliteStorage::new_with_config( + pool, + Config::default() + .set_buffer_size(100) + .set_poll_interval(Duration::from_millis(50)), + ) }); define_bench!("redis", { let conn = apalis_redis::connect(env!("REDIS_URL")).await.unwrap(); - let redis = RedisStorage::new(conn); + let redis = RedisStorage::new_with_config( + conn, + apalis_redis::Config::default() + .set_namespace("redis-bench") + .set_buffer_size(100), + ); redis }); define_bench!("postgres", { let pool = PgPool::connect(env!("POSTGRES_URL")).await.unwrap(); let _ = PostgresStorage::setup(&pool).await.unwrap(); - PostgresStorage::new(pool) + PostgresStorage::new_with_config( + pool, + Config::new("postgres:bench") + .set_buffer_size(100) + .set_poll_interval(Duration::from_millis(50)), + ) }); define_bench!("mysql", { let pool = MySqlPool::connect(env!("MYSQL_URL")).await.unwrap(); let _ = MysqlStorage::setup(&pool).await.unwrap(); - MysqlStorage::new(pool) + MysqlStorage::new_with_config( + pool, + Config::new("mysql:bench") + .set_buffer_size(100) + .set_poll_interval(Duration::from_millis(50)), + ) }); -criterion_group!(benches, sqlite_in_memory); +criterion_group!(benches, sqlite_in_memory, redis, postgres, mysql); criterion_main!(benches); diff --git a/packages/apalis-sql/src/mysql.rs b/packages/apalis-sql/src/mysql.rs index 2939f3e6..1a88cf50 100644 --- a/packages/apalis-sql/src/mysql.rs +++ b/packages/apalis-sql/src/mysql.rs @@ -95,10 +95,9 @@ impl MysqlStorage<(), JsonCodec> { } } -impl MysqlStorage +impl MysqlStorage where T: Serialize + DeserializeOwned, - C: Codec, { /// Create a new instance from a pool pub fn new(pool: MySqlPool) -> Self { @@ -122,12 +121,6 @@ where &self.pool } - /// Expose the codec - #[doc(hidden)] - pub fn codec(&self) -> &PhantomData { - &self.codec - } - /// Get the config used by the storage pub fn get_config(&self) -> &Config { &self.config diff --git a/src/layers/mod.rs b/src/layers/mod.rs index f990573a..0e28e943 100644 --- a/src/layers/mod.rs +++ b/src/layers/mod.rs @@ -18,6 +18,8 @@ pub mod tracing; #[cfg(feature = "limit")] #[cfg_attr(docsrs, doc(cfg(feature = "limit")))] pub mod limit { + pub use tower::limit::ConcurrencyLimitLayer; + pub use tower::limit::GlobalConcurrencyLimitLayer; pub use tower::limit::RateLimitLayer; } From 6b85ac4d1bf54a3937e5fd265a2b4b31aa2c6e70 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 2 Sep 2024 15:45:17 +0300 Subject: [PATCH 42/59] fix(deps): update rust crate sqlx to 0.8.1 [security] (#400) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> --- Cargo.toml | 2 +- packages/apalis-sql/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 0cdfbbaa..32c19f5b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -95,7 +95,7 @@ redis = { version = "0.25.3", default-features = false, features = [ ] } [dev-dependencies.sqlx] -version = "0.8.0" +version = "0.8.1" default-features = false features = ["chrono", "mysql", "sqlite", "postgres", "runtime-tokio"] diff --git a/packages/apalis-sql/Cargo.toml b/packages/apalis-sql/Cargo.toml index 33400fbe..7fd8f4e1 100644 --- a/packages/apalis-sql/Cargo.toml +++ b/packages/apalis-sql/Cargo.toml @@ -19,7 +19,7 @@ async-std-comp = ["async-std", "sqlx/runtime-async-std-rustls"] tokio-comp = ["tokio", "sqlx/runtime-tokio-rustls"] [dependencies.sqlx] -version = "0.8.0" +version = "0.8.1" default-features = false features = ["chrono"] From 83f0e81c37772ac94be0d3a901d558c7b9a60eb8 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Thu, 5 Sep 2024 20:40:07 +0300 Subject: [PATCH 43/59] fix: add some missing data required for dependency injection (#409) * fix: add some missing data required for dependency injection * lint: clippy and fmt --- packages/apalis-core/src/request.rs | 7 ++++++- packages/apalis-cron/src/lib.rs | 3 ++- packages/apalis-redis/src/storage.rs | 2 ++ 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/packages/apalis-core/src/request.rs b/packages/apalis-core/src/request.rs index ca0dc63b..11bea89f 100644 --- a/packages/apalis-core/src/request.rs +++ b/packages/apalis-core/src/request.rs @@ -5,7 +5,11 @@ use tower::layer::util::Identity; use std::{fmt::Debug, pin::Pin}; use crate::{ - data::Extensions, error::Error, poller::Poller, task::task_id::TaskId, worker::WorkerId, + data::Extensions, + error::Error, + poller::Poller, + task::{attempt::Attempt, task_id::TaskId}, + worker::WorkerId, Backend, }; @@ -24,6 +28,7 @@ impl Request { let id = TaskId::new(); let mut data = Extensions::new(); data.insert(id); + data.insert(Attempt::default()); Self::new_with_data(req, data) } diff --git a/packages/apalis-cron/src/lib.rs b/packages/apalis-cron/src/lib.rs index 0faef0e7..afe5402b 100644 --- a/packages/apalis-cron/src/lib.rs +++ b/packages/apalis-cron/src/lib.rs @@ -64,7 +64,7 @@ use apalis_core::request::RequestStream; use apalis_core::task::task_id::TaskId; use apalis_core::worker::WorkerId; use apalis_core::Backend; -use apalis_core::{error::Error, request::Request}; +use apalis_core::{error::Error, request::Request, task::attempt::Attempt}; use chrono::{DateTime, TimeZone, Utc}; pub use cron::Schedule; use std::marker::PhantomData; @@ -122,6 +122,7 @@ where apalis_core::sleep(to_sleep).await; let mut data = Extensions::new(); data.insert(TaskId::new()); + data.insert(Attempt::default()); yield Ok(Some(Request::new_with_data(J::from(timezone.from_utc_datetime(&Utc::now().naive_utc())), data))); }, None => { diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index 34f10243..e5e2cf0a 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -146,6 +146,8 @@ impl RedisJob { impl From> for Request { fn from(val: RedisJob) -> Self { let mut data = Extensions::new(); + data.insert(val.ctx.id.clone()); + data.insert(val.ctx.attempts.clone()); data.insert(val.ctx); Request::new_with_data(val.job, data) } From b1f79bd79533f126fa1ae3b1248a36a9844d4e36 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Thu, 5 Sep 2024 20:41:25 +0300 Subject: [PATCH 44/59] remove: benchmarks (#410) They will be moved to https://github.com/geofmureithi/apalis-benchmarks --- .github/workflows/bench.yaml | 40 --------- Cargo.toml | 7 +- benches/storages.rs | 162 ----------------------------------- 3 files changed, 1 insertion(+), 208 deletions(-) delete mode 100644 .github/workflows/bench.yaml delete mode 100644 benches/storages.rs diff --git a/.github/workflows/bench.yaml b/.github/workflows/bench.yaml deleted file mode 100644 index 943a0e95..00000000 --- a/.github/workflows/bench.yaml +++ /dev/null @@ -1,40 +0,0 @@ -on: - pull_request: - paths: - - 'packages/**' - - '.github/workflows/bench.yaml' - - 'benches/**' -name: Benchmark -jobs: - storageBenchmark: - name: Storage Benchmarks - runs-on: ubuntu-latest - services: - redis: - image: redis - ports: - - 6379:6379 - postgres: - image: postgres:16 - env: - POSTGRES_PASSWORD: postgres - ports: - - 5432:5432 - mysql: - image: mysql:8 - env: - MYSQL_DATABASE: test - MYSQL_USER: test - MYSQL_PASSWORD: test - MYSQL_ROOT_PASSWORD: root - ports: - - 3306:3306 - env: - POSTGRES_URL: postgres://postgres:postgres@localhost/postgres - MYSQL_URL: mysql://test:test@localhost/test - REDIS_URL: redis://127.0.0.1/ - steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - - uses: boa-dev/criterion-compare-action@v3 - with: - branchName: ${{ github.base_ref }} diff --git a/Cargo.toml b/Cargo.toml index 32c19f5b..75472b5e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -80,7 +80,7 @@ pprof = { version = "0.13", features = ["flamegraph"] } paste = "1.0.14" serde = "1" tokio = { version = "1", features = ["macros", "rt-multi-thread"] } -apalis = { path = ".", features = ["limit"]} +apalis = { path = ".", features = ["limit"] } apalis-redis = { path = "./packages/apalis-redis" } apalis-sql = { path = "./packages/apalis-sql", features = [ "postgres", @@ -99,11 +99,6 @@ version = "0.8.1" default-features = false features = ["chrono", "mysql", "sqlite", "postgres", "runtime-tokio"] - -[[bench]] -name = "storages" -harness = false - [workspace] members = [ "packages/apalis-core", diff --git a/benches/storages.rs b/benches/storages.rs deleted file mode 100644 index 2ee420a1..00000000 --- a/benches/storages.rs +++ /dev/null @@ -1,162 +0,0 @@ -use apalis::prelude::*; -use apalis_redis::RedisStorage; -use apalis_sql::mysql::MysqlStorage; -use apalis_sql::postgres::PostgresStorage; -use apalis_sql::sqlite::SqliteStorage; -use apalis_sql::Config; -use criterion::*; -use futures::Future; -use paste::paste; -use serde::{Deserialize, Serialize}; -use sqlx::MySqlPool; -use sqlx::PgPool; -use sqlx::SqlitePool; -use std::time::Duration; -use std::time::Instant; -use tokio::runtime::Runtime; -macro_rules! define_bench { - ($name:expr, $setup:expr ) => { - paste! { - fn [<$name>](c: &mut Criterion) { - let size: usize = 1000; - - let mut group = c.benchmark_group($name); - group.sample_size(10); - group.bench_with_input(BenchmarkId::new("consume", size), &size, |b, &size| { - b.to_async(Runtime::new().unwrap()) - .iter(|| async move { - - let mut storage = { $setup }; - let mut s = storage.clone(); - storage.cleanup().await; - tokio::spawn(async move { - for i in 0..=size { - let _ = s.push(TestJob(i)).await; - } - }); - async fn handle_test_job( - req: TestJob, - size: Data, - wrk: Context, - ) -> Result<(), Error> { - if req.0 == *size { - wrk.stop(); - } - Ok(()) - } - let start = Instant::now(); - WorkerBuilder::new(format!("{}-bench", $name)) - .data(size as usize) - .backend(storage.clone()) - .build_fn(handle_test_job) - .with_executor(TokioExecutor) - .run() - .await; - storage.cleanup().await; - start.elapsed() - }) - }); - group.bench_with_input(BenchmarkId::new("push", size), &size, |b, &s| { - b.to_async(Runtime::new().unwrap()).iter(|| async move { - let mut storage = { $setup }; - let start = Instant::now(); - for i in 0..s { - let _ = black_box(storage.push(TestJob(i)).await); - } - start.elapsed() - }); - }); - }} - }; -} - -#[derive(Serialize, Deserialize, Debug)] -struct TestJob(usize); -trait CleanUp { - fn cleanup(&mut self) -> impl Future + Send; -} - -impl CleanUp for SqliteStorage { - async fn cleanup(&mut self) { - let pool = self.pool(); - let query = "DELETE FROM Jobs; DELETE from Workers;"; - sqlx::query(query).execute(pool).await.unwrap(); - } -} - -impl CleanUp for PostgresStorage { - async fn cleanup(&mut self) { - let pool = self.pool(); - let query = "DELETE FROM apalis.jobs;"; - sqlx::query(query).execute(pool).await.unwrap(); - let query = "DELETE from apalis.workers;"; - sqlx::query(query).execute(pool).await.unwrap(); - } -} - -impl CleanUp for MysqlStorage { - async fn cleanup(&mut self) { - let pool = self.pool(); - let query = "DELETE FROM jobs;"; - sqlx::query(query).execute(pool).await.unwrap(); - let query = "DELETE from workers;"; - sqlx::query(query).execute(pool).await.unwrap(); - } -} - -impl CleanUp for RedisStorage { - async fn cleanup(&mut self) { - let mut conn = self.get_connection().clone(); - let _resp: String = redis::cmd("FLUSHDB") - .query_async(&mut conn) - .await - .expect("failed to Flushdb"); - } -} - -define_bench!("sqlite_in_memory", { - let pool = SqlitePool::connect("sqlite::memory:").await.unwrap(); - let _ = SqliteStorage::setup(&pool).await; - SqliteStorage::new_with_config( - pool, - Config::default() - .set_buffer_size(100) - .set_poll_interval(Duration::from_millis(50)), - ) -}); - -define_bench!("redis", { - let conn = apalis_redis::connect(env!("REDIS_URL")).await.unwrap(); - let redis = RedisStorage::new_with_config( - conn, - apalis_redis::Config::default() - .set_namespace("redis-bench") - .set_buffer_size(100), - ); - redis -}); - -define_bench!("postgres", { - let pool = PgPool::connect(env!("POSTGRES_URL")).await.unwrap(); - let _ = PostgresStorage::setup(&pool).await.unwrap(); - PostgresStorage::new_with_config( - pool, - Config::new("postgres:bench") - .set_buffer_size(100) - .set_poll_interval(Duration::from_millis(50)), - ) -}); - -define_bench!("mysql", { - let pool = MySqlPool::connect(env!("MYSQL_URL")).await.unwrap(); - let _ = MysqlStorage::setup(&pool).await.unwrap(); - MysqlStorage::new_with_config( - pool, - Config::new("mysql:bench") - .set_buffer_size(100) - .set_poll_interval(Duration::from_millis(50)), - ) -}); - -criterion_group!(benches, sqlite_in_memory, redis, postgres, mysql); -criterion_main!(benches); From 1c19c7ab6c9bdce1d8826e50eacfb47e77692b08 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Fri, 6 Sep 2024 11:40:12 +0300 Subject: [PATCH 45/59] bump: to 0.6.0-rc.6 (#412) --- Cargo.toml | 4 ++-- packages/apalis-core/Cargo.toml | 2 +- packages/apalis-cron/Cargo.toml | 4 ++-- packages/apalis-redis/Cargo.toml | 4 ++-- packages/apalis-sql/Cargo.toml | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 75472b5e..ae3d60f1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,7 +4,7 @@ repository = "https://github.com/geofmureithi/apalis" [package] name = "apalis" -version = "0.6.0-rc.5" +version = "0.6.0-rc.6" authors = ["Geoffrey Mureithi "] description = "Simple, extensible multithreaded background job processing for Rust" edition.workspace = true @@ -58,7 +58,7 @@ layers = [ docsrs = ["document-features"] [dependencies.apalis-core] -version = "0.6.0-rc.5" +version = "0.6.0-rc.6" default-features = false path = "./packages/apalis-core" diff --git a/packages/apalis-core/Cargo.toml b/packages/apalis-core/Cargo.toml index d3f36711..ffb31f91 100644 --- a/packages/apalis-core/Cargo.toml +++ b/packages/apalis-core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-core" -version = "0.6.0-rc.5" +version = "0.6.0-rc.6" authors = ["Njuguna Mureithi "] edition.workspace = true repository.workspace = true diff --git a/packages/apalis-cron/Cargo.toml b/packages/apalis-cron/Cargo.toml index 0a8ae9ad..b7e57a1f 100644 --- a/packages/apalis-cron/Cargo.toml +++ b/packages/apalis-cron/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-cron" -version = "0.6.0-rc.5" +version = "0.6.0-rc.6" edition.workspace = true repository.workspace = true authors = ["Njuguna Mureithi "] @@ -10,7 +10,7 @@ description = "A simple yet extensible library for cron-like job scheduling for # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.5", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.6", default-features = false, features = [ "sleep", ] } cron = "0.12.1" diff --git a/packages/apalis-redis/Cargo.toml b/packages/apalis-redis/Cargo.toml index 35b8711c..fa60afce 100644 --- a/packages/apalis-redis/Cargo.toml +++ b/packages/apalis-redis/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-redis" -version = "0.6.0-rc.5" +version = "0.6.0-rc.6" authors = ["Njuguna Mureithi "] edition.workspace = true repository.workspace = true @@ -12,7 +12,7 @@ description = "Redis Storage for apalis: use Redis for background jobs and messa # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.5", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.6", default-features = false, features = [ "sleep", "json", ] } diff --git a/packages/apalis-sql/Cargo.toml b/packages/apalis-sql/Cargo.toml index 7fd8f4e1..ab2da357 100644 --- a/packages/apalis-sql/Cargo.toml +++ b/packages/apalis-sql/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-sql" -version = "0.6.0-rc.5" +version = "0.6.0-rc.6" authors = ["Njuguna Mureithi "] edition.workspace = true repository.workspace = true @@ -26,7 +26,7 @@ features = ["chrono"] [dependencies] serde = { version = "1", features = ["derive"] } serde_json = "1" -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.5", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.6", default-features = false, features = [ "sleep", "json", ] } From 4ec676f6de00a5054fea32709fe3e2e536746585 Mon Sep 17 00:00:00 2001 From: John Vandenberg Date: Mon, 9 Sep 2024 12:41:51 +0800 Subject: [PATCH 46/59] Update async-std to 1.13 (#413) --- Cargo.toml | 2 +- examples/async-std-runtime/Cargo.toml | 2 +- packages/apalis-cron/Cargo.toml | 2 +- packages/apalis-redis/Cargo.toml | 2 +- packages/apalis-sql/Cargo.toml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ae3d60f1..1c0f3723 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -133,7 +133,7 @@ members = [ tokio = { version = "1", features = [ "rt", ], default-features = false, optional = true } -async-std = { version = "1", optional = true } +async-std = { version = "1.13.0", optional = true } tower = { version = "0.4", features = ["util"], default-features = false } tracing-futures = { version = "0.2.5", optional = true, default-features = false } sentry-core = { version = "0.34.0", optional = true, default-features = false } diff --git a/examples/async-std-runtime/Cargo.toml b/examples/async-std-runtime/Cargo.toml index 18701e17..ef801784 100644 --- a/examples/async-std-runtime/Cargo.toml +++ b/examples/async-std-runtime/Cargo.toml @@ -14,7 +14,7 @@ apalis = { path = "../../", default-features = false, features = [ ] } apalis-cron = { path = "../../packages/apalis-cron" } apalis-core = { path = "../../packages/apalis-core", default-features = false } -async-std = { version = "1.12.0", features = ["attributes"] } +async-std = { version = "1.13.0", features = ["attributes"] } serde = "1" tracing-subscriber = "0.3.11" chrono = { version = "0.4", default-features = false, features = ["clock"] } diff --git a/packages/apalis-cron/Cargo.toml b/packages/apalis-cron/Cargo.toml index b7e57a1f..02fa8260 100644 --- a/packages/apalis-cron/Cargo.toml +++ b/packages/apalis-cron/Cargo.toml @@ -21,7 +21,7 @@ chrono = { version = "0.4.38", default-features = false, features = [ "serde", ] } async-stream = "0.3.5" -async-std = { version = "1.12.0", optional = true } +async-std = { version = "1.13.0", optional = true } [dev-dependencies] tokio = { version = "1", features = ["macros"] } diff --git a/packages/apalis-redis/Cargo.toml b/packages/apalis-redis/Cargo.toml index fa60afce..d9205e20 100644 --- a/packages/apalis-redis/Cargo.toml +++ b/packages/apalis-redis/Cargo.toml @@ -31,7 +31,7 @@ chrono = { version = "0.4.38", default-features = false, features = [ async-stream = "0.3.5" futures = "0.3.30" tokio = { version = "1", features = ["rt", "net"], optional = true } -async-std = { version = "1.12.0", optional = true } +async-std = { version = "1.13.0", optional = true } async-trait = "0.1.80" tower = "0.4" diff --git a/packages/apalis-sql/Cargo.toml b/packages/apalis-sql/Cargo.toml index ab2da357..80851d2f 100644 --- a/packages/apalis-sql/Cargo.toml +++ b/packages/apalis-sql/Cargo.toml @@ -35,7 +35,7 @@ futures = "0.3.30" async-stream = "0.3.5" tokio = { version = "1", features = ["rt", "net"], optional = true } futures-lite = "2.3.0" -async-std = { version = "1.12.0", optional = true } +async-std = { version = "1.13.0", optional = true } chrono = { version = "0.4", features = ["serde"] } From 7a496ad5259f23e7c4bbee7fd17f71f3b90fb9a0 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Tue, 17 Sep 2024 09:09:23 +0300 Subject: [PATCH 47/59] Feature: Introducing Request Context (#416) * wip: introduce context to request * fix: get request context working * lint: cargo fmt * fix: get tests compiling * add: push_request and shedule_request * fix: task_id for Testwrapper * fix: minor checks and fixes on postgres tests * fix: bug on postgres fetch_next --- examples/actix-web/src/main.rs | 3 +- examples/async-std-runtime/src/main.rs | 12 +- examples/axum/src/main.rs | 6 +- examples/basics/Cargo.toml | 2 +- examples/basics/src/layer.rs | 14 +- examples/basics/src/main.rs | 27 +- examples/cron/Cargo.toml | 1 + examples/cron/src/main.rs | 4 +- examples/fn-args/src/main.rs | 6 +- examples/prometheus/src/main.rs | 8 +- examples/redis-deadpool/src/main.rs | 2 +- examples/redis-mq-example/src/main.rs | 57 ++-- examples/redis-with-msg-pack/src/main.rs | 2 +- examples/redis/src/main.rs | 12 +- examples/sqlite/Cargo.toml | 2 +- examples/sqlite/src/main.rs | 6 +- packages/apalis-core/src/builder.rs | 62 ++-- packages/apalis-core/src/data.rs | 23 ++ packages/apalis-core/src/error.rs | 105 +++++- packages/apalis-core/src/layers.rs | 91 +++--- packages/apalis-core/src/lib.rs | 44 ++- packages/apalis-core/src/memory.rs | 10 +- packages/apalis-core/src/monitor/mod.rs | 57 ++-- packages/apalis-core/src/request.rs | 87 +++-- packages/apalis-core/src/response.rs | 114 ++++++- packages/apalis-core/src/service_fn.rs | 64 ++-- packages/apalis-core/src/storage/mod.rs | 45 ++- packages/apalis-core/src/task/attempt.rs | 8 + packages/apalis-core/src/task/namespace.rs | 4 +- packages/apalis-core/src/worker/mod.rs | 359 ++++++++++----------- packages/apalis-cron/src/lib.rs | 26 +- packages/apalis-redis/src/lib.rs | 2 +- packages/apalis-redis/src/storage.rs | 242 +++++--------- packages/apalis-sql/src/context.rs | 37 +-- packages/apalis-sql/src/from_row.rs | 94 +++--- packages/apalis-sql/src/lib.rs | 24 +- packages/apalis-sql/src/mysql.rs | 157 +++++---- packages/apalis-sql/src/postgres.rs | 167 +++++----- packages/apalis-sql/src/sqlite.rs | 145 +++++---- src/layers/catch_panic/mod.rs | 98 +++--- src/layers/mod.rs | 2 + src/layers/prometheus/mod.rs | 25 +- src/layers/retry/mod.rs | 29 +- src/layers/sentry/mod.rs | 44 +-- src/layers/tracing/make_span.rs | 18 +- src/layers/tracing/mod.rs | 26 +- src/layers/tracing/on_failure.rs | 22 +- src/layers/tracing/on_request.rs | 18 +- src/lib.rs | 4 +- 49 files changed, 1327 insertions(+), 1090 deletions(-) diff --git a/examples/actix-web/src/main.rs b/examples/actix-web/src/main.rs index a4b73b64..472eec8e 100644 --- a/examples/actix-web/src/main.rs +++ b/examples/actix-web/src/main.rs @@ -17,7 +17,7 @@ async fn push_email( let mut storage = storage.clone(); let res = storage.push(email.into_inner()).await; match res { - Ok(jid) => HttpResponse::Ok().body(format!("Email with job_id [{jid}] added to queue")), + Ok(ctx) => HttpResponse::Ok().json(ctx), Err(e) => HttpResponse::InternalServerError().body(format!("{e}")), } } @@ -46,7 +46,6 @@ async fn main() -> Result<()> { WorkerBuilder::new("tasty-avocado") .layer(TraceLayer::new()) .backend(storage) - // .chain(|svc|svc.map_err(|e| Box::new(e))) .build_fn(send_email) }) .run_with_signal(signal::ctrl_c()); diff --git a/examples/async-std-runtime/src/main.rs b/examples/async-std-runtime/src/main.rs index 58f2afae..0b9c7ad3 100644 --- a/examples/async-std-runtime/src/main.rs +++ b/examples/async-std-runtime/src/main.rs @@ -9,7 +9,7 @@ use apalis_cron::{CronStream, Schedule}; use chrono::{DateTime, Utc}; use tracing::{debug, info, Instrument, Level, Span}; -type WorkerCtx = Context; +type WorkerCtx = Data>; #[derive(Default, Debug, Clone)] struct Reminder(DateTime); @@ -48,7 +48,7 @@ async fn main() -> Result<()> { .build_fn(send_reminder); Monitor::::new() - .register_with_count(2, worker) + .register(worker) .on_event(|e| debug!("Worker event: {e:?}")) .run_with_signal(async { ctrl_c.recv().await.ok(); @@ -95,10 +95,10 @@ impl ReminderSpan { } } -impl MakeSpan for ReminderSpan { - fn make_span(&mut self, req: &Request) -> Span { - let task_id: &TaskId = req.get().unwrap(); - let attempts: Attempt = req.get().cloned().unwrap_or_default(); +impl MakeSpan for ReminderSpan { + fn make_span(&mut self, req: &Request) -> Span { + let task_id: &TaskId = &req.parts.task_id; + let attempts: &Attempt = &req.parts.attempt; let span = Span::current(); macro_rules! make_span { ($level:expr) => { diff --git a/examples/axum/src/main.rs b/examples/axum/src/main.rs index a3d7774a..3e0e4da2 100644 --- a/examples/axum/src/main.rs +++ b/examples/axum/src/main.rs @@ -36,9 +36,9 @@ where let new_job = storage.push(input).await; match new_job { - Ok(id) => ( + Ok(ctx) => ( StatusCode::CREATED, - format!("Job [{id}] was successfully added"), + format!("Job [{ctx:?}] was successfully added"), ), Err(e) => ( StatusCode::INTERNAL_SERVER_ERROR, @@ -74,7 +74,7 @@ async fn main() -> Result<()> { }; let monitor = async { Monitor::::new() - .register_with_count(2, { + .register({ WorkerBuilder::new("tasty-pear") .layer(TraceLayer::new()) .backend(storage.clone()) diff --git a/examples/basics/Cargo.toml b/examples/basics/Cargo.toml index feade0b9..30ab4923 100644 --- a/examples/basics/Cargo.toml +++ b/examples/basics/Cargo.toml @@ -9,7 +9,7 @@ license = "MIT OR Apache-2.0" thiserror = "1" tokio = { version = "1", features = ["full"] } apalis = { path = "../../", features = ["limit", "tokio-comp", "catch-panic"] } -apalis-sql = { path = "../../packages/apalis-sql" } +apalis-sql = { path = "../../packages/apalis-sql", features = ["sqlite"] } serde = "1" tracing-subscriber = "0.3.11" email-service = { path = "../email-service" } diff --git a/examples/basics/src/layer.rs b/examples/basics/src/layer.rs index 2918346f..d8da32a1 100644 --- a/examples/basics/src/layer.rs +++ b/examples/basics/src/layer.rs @@ -1,4 +1,7 @@ -use std::task::{Context, Poll}; +use std::{ + fmt::Debug, + task::{Context, Poll}, +}; use apalis::prelude::Request; use tower::{Layer, Service}; @@ -34,10 +37,11 @@ pub struct LogService { service: S, } -impl Service> for LogService +impl Service> for LogService where - S: Service> + Clone, - Req: std::fmt::Debug, + S: Service> + Clone, + Req: Debug, + Ctx: Debug, { type Response = S::Response; type Error = S::Error; @@ -47,7 +51,7 @@ where self.service.poll_ready(cx) } - fn call(&mut self, request: Request) -> Self::Future { + fn call(&mut self, request: Request) -> Self::Future { // Use service to apply middleware before or(and) after a request info!("request = {:?}, target = {:?}", request, self.target); self.service.call(request) diff --git a/examples/basics/src/main.rs b/examples/basics/src/main.rs index e6b1f0cb..ff492443 100644 --- a/examples/basics/src/main.rs +++ b/examples/basics/src/main.rs @@ -2,7 +2,7 @@ mod cache; mod layer; mod service; -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use apalis::{ layers::{catch_panic::CatchPanicLayer, tracing::TraceLayer}, @@ -35,7 +35,7 @@ async fn produce_jobs(storage: &SqliteStorage) { } #[derive(thiserror::Error, Debug)] -pub enum Error { +pub enum ServiceError { #[error("data store disconnected")] Disconnect(#[from] std::io::Error), #[error("the data for key `{0}` is not available")] @@ -46,15 +46,21 @@ pub enum Error { Unknown, } +#[derive(thiserror::Error, Debug)] +pub enum PanicError { + #[error("{0}")] + Panic(String), +} + /// Quick solution to prevent spam. /// If email in cache, then send email else complete the job but let a validation process run in the background, async fn send_email( email: Email, svc: Data, worker_ctx: Data, - worker_id: WorkerId, + worker_id: Data, cache: Data, -) -> Result<(), Error> { +) -> Result<(), ServiceError> { info!("Job started in worker {:?}", worker_id); let cache_clone = cache.clone(); let email_to = email.to.clone(); @@ -97,10 +103,19 @@ async fn main() -> Result<(), std::io::Error> { produce_jobs(&sqlite).await; Monitor::::new() - .register_with_count(2, { + .register({ WorkerBuilder::new("tasty-banana") // This handles any panics that may occur in any of the layers below - .layer(CatchPanicLayer::new()) + .layer(CatchPanicLayer::with_panic_handler(|e| { + let panic_info = if let Some(s) = e.downcast_ref::<&str>() { + s.to_string() + } else if let Some(s) = e.downcast_ref::() { + s.clone() + } else { + "Unknown panic".to_string() + }; + Error::Abort(Arc::new(Box::new(PanicError::Panic(panic_info)))) + })) .layer(TraceLayer::new()) .layer(LogLayer::new("some-log-example")) // Add shared context to all jobs executed by this worker diff --git a/examples/cron/Cargo.toml b/examples/cron/Cargo.toml index 070ac495..ab3218d1 100644 --- a/examples/cron/Cargo.toml +++ b/examples/cron/Cargo.toml @@ -9,6 +9,7 @@ apalis = { path = "../../", default-features = false, features = [ "tokio-comp", "tracing", "limit", + "catch-panic" ] } apalis-cron = { path = "../../packages/apalis-cron" } tokio = { version = "1", features = ["full"] } diff --git a/examples/cron/src/main.rs b/examples/cron/src/main.rs index 4a8fb74c..4a22dfe5 100644 --- a/examples/cron/src/main.rs +++ b/examples/cron/src/main.rs @@ -1,3 +1,4 @@ +use apalis::layers::tracing::TraceLayer; use apalis::prelude::*; use apalis::utils::TokioExecutor; use apalis_cron::CronStream; @@ -31,13 +32,14 @@ async fn send_reminder(job: Reminder, svc: Data) { async fn main() { let schedule = Schedule::from_str("1/1 * * * * *").unwrap(); let worker = WorkerBuilder::new("morning-cereal") + .layer(TraceLayer::new()) .layer(LoadShedLayer::new()) // Important when you have layers that block the service .layer(RateLimitLayer::new(1, Duration::from_secs(2))) .data(FakeService) .backend(CronStream::new(schedule)) .build_fn(send_reminder); Monitor::::new() - .register(worker) + .register_with_count(2, worker) .run() .await .unwrap(); diff --git a/examples/fn-args/src/main.rs b/examples/fn-args/src/main.rs index 5fd614cb..4a28d285 100644 --- a/examples/fn-args/src/main.rs +++ b/examples/fn-args/src/main.rs @@ -20,16 +20,16 @@ struct SimpleJob {} // A task can have up to 16 arguments async fn simple_job( _: SimpleJob, // Required, must be of the type of the job/message - worker_id: WorkerId, // The worker running the job, added by worker + worker_id: Data, // The worker running the job, added by worker _worker_ctx: Context, // The worker context, added by worker _sqlite: Data>, // The source, added by storage task_id: Data, // The task id, added by storage - ctx: Data, // The task context, added by storage + ctx: SqlContext, // The task context count: Data, // Our custom data added via layer ) { // increment the counter let current = count.fetch_add(1, Ordering::Relaxed); - info!("worker: {worker_id}; task_id: {task_id:?}, ctx: {ctx:?}, count: {current:?}"); + info!("worker: {worker_id:?}; task_id: {task_id:?}, ctx: {ctx:?}, count: {current:?}"); } async fn produce_jobs(storage: &mut SqliteStorage) { diff --git a/examples/prometheus/src/main.rs b/examples/prometheus/src/main.rs index 160dfc58..eaa334a2 100644 --- a/examples/prometheus/src/main.rs +++ b/examples/prometheus/src/main.rs @@ -49,9 +49,9 @@ async fn main() -> Result<()> { }; let monitor = async { Monitor::::new() - .register_with_count(2, { + .register({ WorkerBuilder::new("tasty-banana") - .layer(PrometheusLayer) + .layer(PrometheusLayer::default()) .backend(storage.clone()) .build_fn(send_email) }) @@ -94,9 +94,9 @@ where let new_job = storage.push(input).await; match new_job { - Ok(jid) => ( + Ok(ctx) => ( StatusCode::CREATED, - format!("Job [{jid}] was successfully added"), + format!("Job [{ctx:?}] was successfully added"), ), Err(e) => ( StatusCode::INTERNAL_SERVER_ERROR, diff --git a/examples/redis-deadpool/src/main.rs b/examples/redis-deadpool/src/main.rs index 0d0d7561..9d625934 100644 --- a/examples/redis-deadpool/src/main.rs +++ b/examples/redis-deadpool/src/main.rs @@ -31,7 +31,7 @@ async fn main() -> Result<()> { .build_fn(send_email); Monitor::::new() - .register_with_count(2, worker) + .register(worker) .shutdown_timeout(Duration::from_millis(5000)) .run_with_signal(async { tokio::signal::ctrl_c().await?; diff --git a/examples/redis-mq-example/src/main.rs b/examples/redis-mq-example/src/main.rs index 1b3f2456..5f8f7408 100644 --- a/examples/redis-mq-example/src/main.rs +++ b/examples/redis-mq-example/src/main.rs @@ -2,16 +2,17 @@ use std::{fmt::Debug, marker::PhantomData, time::Duration}; use apalis::{layers::tracing::TraceLayer, prelude::*}; -use apalis_redis::{self, Config, RedisJob}; +use apalis_redis::{self, Config}; use apalis_core::{ codec::json::JsonCodec, layers::{Ack, AckLayer}, + response::Response, }; use email_service::{send_email, Email}; use futures::{channel::mpsc, SinkExt}; use rsmq_async::{Rsmq, RsmqConnection, RsmqError}; -use serde::{de::DeserializeOwned, Serialize}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; use tokio::time::sleep; use tracing::{error, info}; @@ -22,6 +23,18 @@ struct RedisMq>> { codec: PhantomData, } +#[derive(Clone, Debug, Serialize, Deserialize, Default)] +pub struct RedisMqContext { + max_attempts: usize, + message_id: String, +} + +impl FromRequest> for RedisMqContext { + fn from_request(req: &Request) -> Result { + Ok(req.parts.context.clone()) + } +} + // Manually implement Clone for RedisMq impl Clone for RedisMq { fn clone(&self) -> Self { @@ -34,32 +47,30 @@ impl Clone for RedisMq { } } -impl Backend, Res> for RedisMq +impl Backend, Res> for RedisMq where - M: Send + DeserializeOwned + 'static, + Req: Send + DeserializeOwned + 'static, C: Codec>, { - type Stream = RequestStream>; + type Stream = RequestStream>; - type Layer = AckLayer; + type Layer = AckLayer; fn poll(mut self, _worker_id: WorkerId) -> Poller { let (mut tx, rx) = mpsc::channel(self.config.get_buffer_size()); - let stream: RequestStream> = Box::pin(rx); + let stream: RequestStream> = Box::pin(rx); let layer = AckLayer::new(self.clone()); let heartbeat = async move { loop { sleep(*self.config.get_poll_interval()).await; - let msg: Option> = self + let msg: Option> = self .conn .receive_message(self.config.get_namespace(), None) .await .unwrap() .map(|r| { - let mut req: Request = C::decode::>(r.message) - .map_err(Into::into) - .unwrap() - .into(); + let mut req: Request = + C::decode(r.message).map_err(Into::into).unwrap(); req.insert(r.id); req }); @@ -76,18 +87,20 @@ where Res: Debug + Send + Sync, C: Send, { - type Context = String; + type Context = RedisMqContext; type AckError = RsmqError; async fn ack( &mut self, ctx: &Self::Context, - _res: &Result, + res: &Response, ) -> Result<(), Self::AckError> { - self.conn - .delete_message(self.config.get_namespace(), ctx) - .await?; + if res.is_success() || res.attempt.current() >= ctx.max_attempts { + self.conn + .delete_message(self.config.get_namespace(), &ctx.message_id) + .await?; + } Ok(()) } } @@ -100,7 +113,7 @@ where type Error = RsmqError; async fn enqueue(&mut self, message: Message) -> Result<(), Self::Error> { - let bytes = C::encode(&RedisJob::new(message, Default::default())) + let bytes = C::encode(&Request::::new(message)) .map_err(Into::into) .unwrap(); self.conn @@ -115,11 +128,9 @@ where .receive_message(self.config.get_namespace(), None) .await? .map(|r| { - let req: Request = C::decode::>(r.message) - .map_err(Into::into) - .unwrap() - .into(); - req.take() + let req: Request = + C::decode(r.message).map_err(Into::into).unwrap(); + req.args })) } diff --git a/examples/redis-with-msg-pack/src/main.rs b/examples/redis-with-msg-pack/src/main.rs index 1ac2e24d..ce5e57a5 100644 --- a/examples/redis-with-msg-pack/src/main.rs +++ b/examples/redis-with-msg-pack/src/main.rs @@ -45,7 +45,7 @@ async fn main() -> Result<()> { .build_fn(send_email); Monitor::::new() - .register_with_count(2, worker) + .register(worker) .shutdown_timeout(Duration::from_millis(5000)) .run_with_signal(async { tokio::signal::ctrl_c().await?; diff --git a/examples/redis/src/main.rs b/examples/redis/src/main.rs index 5e0723e2..32a16a3f 100644 --- a/examples/redis/src/main.rs +++ b/examples/redis/src/main.rs @@ -1,7 +1,9 @@ -use std::{sync::Arc, time::Duration}; +use std::time::Duration; use anyhow::Result; -use apalis::layers::limit::RateLimitLayer; +use apalis::layers::limit::{ConcurrencyLimitLayer, RateLimitLayer}; +use apalis::layers::tracing::TraceLayer; +use apalis::layers::ErrorHandlingLayer; use apalis::{layers::TimeoutLayer, prelude::*}; use apalis_redis::RedisStorage; @@ -33,14 +35,16 @@ async fn main() -> Result<()> { produce_jobs(storage.clone()).await?; let worker = WorkerBuilder::new("rango-tango") - .chain(|svc| svc.map_err(|e| Error::Failed(Arc::new(e)))) + .layer(ErrorHandlingLayer::new()) + .layer(TraceLayer::new()) .layer(RateLimitLayer::new(5, Duration::from_secs(1))) .layer(TimeoutLayer::new(Duration::from_millis(500))) + .layer(ConcurrencyLimitLayer::new(2)) .backend(storage) .build_fn(send_email); Monitor::::new() - .register_with_count(2, worker) + .register(worker) .on_event(|e| { let worker_id = e.id(); match e.inner() { diff --git a/examples/sqlite/Cargo.toml b/examples/sqlite/Cargo.toml index b3a4cf96..5a58b2d6 100644 --- a/examples/sqlite/Cargo.toml +++ b/examples/sqlite/Cargo.toml @@ -9,7 +9,7 @@ license = "MIT OR Apache-2.0" anyhow = "1" tokio = { version = "1", features = ["full"] } apalis = { path = "../../", features = ["limit", "tracing", "tokio-comp"] } -apalis-sql = { path = "../../packages/apalis-sql", features = ["sqlite"] } +apalis-sql = { path = "../../packages/apalis-sql", features = ["sqlite", "tokio-comp"] } serde = { version = "1", features = ["derive"] } tracing-subscriber = "0.3.11" chrono = { version = "0.4", default-features = false, features = ["clock"] } diff --git a/examples/sqlite/src/main.rs b/examples/sqlite/src/main.rs index 282898b3..802a4ff7 100644 --- a/examples/sqlite/src/main.rs +++ b/examples/sqlite/src/main.rs @@ -59,15 +59,15 @@ async fn main() -> Result<()> { produce_notifications(¬ification_storage).await?; Monitor::::new() - .register_with_count(2, { + .register({ WorkerBuilder::new("tasty-banana") .layer(TraceLayer::new()) .backend(email_storage) .build_fn(send_email) }) - .register_with_count(10, { + .register({ WorkerBuilder::new("tasty-mango") - .layer(TraceLayer::new()) + // .layer(TraceLayer::new()) .backend(notification_storage) .build_fn(job::notify) }) diff --git a/packages/apalis-core/src/builder.rs b/packages/apalis-core/src/builder.rs index 6ed8d3f3..bc9bd21f 100644 --- a/packages/apalis-core/src/builder.rs +++ b/packages/apalis-core/src/builder.rs @@ -18,16 +18,16 @@ use crate::{ /// Allows building a [`Worker`]. /// Usually the output is [`Worker`] -pub struct WorkerBuilder { +pub struct WorkerBuilder { id: WorkerId, - request: PhantomData, + request: PhantomData>, layer: ServiceBuilder, source: Source, service: PhantomData, } -impl std::fmt::Debug - for WorkerBuilder +impl std::fmt::Debug + for WorkerBuilder { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("WorkerBuilder") @@ -39,10 +39,10 @@ impl std::fmt::Debug } } -impl WorkerBuilder<(), (), Identity, Serv> { +impl WorkerBuilder<(), (), (), Identity, Serv> { /// Build a new [`WorkerBuilder`] instance with a name for the worker to build - pub fn new>(name: T) -> WorkerBuilder<(), (), Identity, Serv> { - let job: PhantomData<()> = PhantomData; + pub fn new>(name: T) -> WorkerBuilder<(), (), (), Identity, Serv> { + let job: PhantomData> = PhantomData; WorkerBuilder { request: job, layer: ServiceBuilder::new(), @@ -53,13 +53,17 @@ impl WorkerBuilder<(), (), Identity, Serv> { } } -impl WorkerBuilder { +impl WorkerBuilder<(), (), (), M, Serv> { /// Consume a stream directly #[deprecated(since = "0.6.0", note = "Consider using the `.backend`")] - pub fn stream>, Error>> + Send + 'static, NJ>( + pub fn stream< + NS: Stream>, Error>> + Send + 'static, + NJ, + Ctx, + >( self, stream: NS, - ) -> WorkerBuilder { + ) -> WorkerBuilder { WorkerBuilder { request: PhantomData, layer: self.layer, @@ -70,12 +74,12 @@ impl WorkerBuilder { } /// Set the source to a backend that implements [Backend] - pub fn backend, Res>, NJ, Res: Send>( + pub fn backend, Res>, NJ, Res: Send, Ctx>( self, backend: NB, - ) -> WorkerBuilder + ) -> WorkerBuilder where - Serv: Service, Response = Res>, + Serv: Service, Response = Res>, { WorkerBuilder { request: PhantomData, @@ -87,13 +91,13 @@ impl WorkerBuilder { } } -impl WorkerBuilder { +impl WorkerBuilder { /// Allows of decorating the service that consumes jobs. /// Allows adding multiple [`tower`] middleware pub fn chain( self, f: impl Fn(ServiceBuilder) -> ServiceBuilder, - ) -> WorkerBuilder { + ) -> WorkerBuilder { let middleware = f(self.layer); WorkerBuilder { @@ -105,7 +109,7 @@ impl WorkerBuilder { } } /// Allows adding a single layer [tower] middleware - pub fn layer(self, layer: U) -> WorkerBuilder, Serv> + pub fn layer(self, layer: U) -> WorkerBuilder, Serv> where M: Layer, { @@ -120,7 +124,7 @@ impl WorkerBuilder { /// Adds data to the context /// This will be shared by all requests - pub fn data(self, data: D) -> WorkerBuilder, M>, Serv> + pub fn data(self, data: D) -> WorkerBuilder, M>, Serv> where M: Layer>, { @@ -134,23 +138,22 @@ impl WorkerBuilder { } } -impl< - Req: Send + 'static + Sync, - P: Backend, S::Response> + 'static, - M: 'static, - S, - > WorkerFactory for WorkerBuilder +impl WorkerFactory for WorkerBuilder where - S: Service> + Send + 'static + Clone + Sync, + S: Service> + Send + 'static + Sync, S::Future: Send, S::Response: 'static, M: Layer, + Req: Send + 'static + Sync, + P: Backend, S::Response> + 'static, + M: 'static, { type Source = P; type Service = M::Service; - fn build(self, service: S) -> Worker> { + + fn build(self, service: S) -> Worker> { let worker_id = self.id; let poller = self.source; let middleware = self.layer; @@ -159,9 +162,8 @@ where Worker::new(worker_id, Ready::new(service, poller)) } } - /// Helper trait for building new Workers from [`WorkerBuilder`] -pub trait WorkerFactory { +pub trait WorkerFactory { /// The request source for the worker type Source; @@ -180,7 +182,7 @@ pub trait WorkerFactory { /// Helper trait for building new Workers from [`WorkerBuilder`] -pub trait WorkerFactoryFn { +pub trait WorkerFactoryFn { /// The request source for the [`Worker`] type Source; @@ -219,9 +221,9 @@ pub trait WorkerFactoryFn { fn build_fn(self, f: F) -> Worker>; } -impl WorkerFactoryFn for W +impl WorkerFactoryFn for W where - W: WorkerFactory>, + W: WorkerFactory>, { type Source = W::Source; diff --git a/packages/apalis-core/src/data.rs b/packages/apalis-core/src/data.rs index 33cd3f9e..e2829c8a 100644 --- a/packages/apalis-core/src/data.rs +++ b/packages/apalis-core/src/data.rs @@ -5,6 +5,8 @@ use std::collections::HashMap; use std::fmt; use std::hash::{BuildHasherDefault, Hasher}; +use crate::error::Error; + type AnyMap = HashMap, BuildHasherDefault>; // With TypeIds as keys, there's no need to hash them. They are already hashes @@ -87,6 +89,27 @@ impl Extensions { .and_then(|boxed| (**boxed).as_any().downcast_ref()) } + /// Get a checked reference to a type previously inserted on this `Extensions`. + /// + /// # Example + /// + /// ``` + /// # use apalis_core::data::Extensions; + /// let mut ext = Extensions::new(); + /// assert!(ext.get_checked::().is_err()); + /// ext.insert(5i32); + /// + /// assert_eq!(ext.get_checked::(), Ok(&5i32)); + /// ``` + pub fn get_checked(&self) -> Result<&T, Error> { + self.get() + .ok_or({ + let type_name = std::any::type_name::(); + Error::MissingData( + format!("Missing the an entry for `{type_name}`. Did you forget to add `.data(<{type_name}>)", )) + }) + } + /// Get a mutable reference to a type previously inserted on this `Extensions`. /// /// # Example diff --git a/packages/apalis-core/src/error.rs b/packages/apalis-core/src/error.rs index 64f1f666..aa27412e 100644 --- a/packages/apalis-core/src/error.rs +++ b/packages/apalis-core/src/error.rs @@ -1,5 +1,13 @@ -use std::{error::Error as StdError, sync::Arc}; +use std::{ + error::Error as StdError, + future::Future, + marker::PhantomData, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; use thiserror::Error; +use tower::Service; use crate::worker::WorkerError; @@ -14,22 +22,21 @@ pub enum Error { #[error("FailedError: {0}")] Failed(#[source] Arc), - /// A generic IO error - #[error("IoError: {0}")] - Io(#[from] Arc), - - /// Missing some context and yet it was requested during execution. - #[error("MissingContextError: {0}")] - MissingContext(String), - /// Execution was aborted #[error("AbortError: {0}")] Abort(#[source] Arc), + #[doc(hidden)] /// Encountered an error during worker execution + /// This should not be used inside a task function #[error("WorkerError: {0}")] WorkerError(WorkerError), + /// Missing some data and yet it was requested during execution. + /// This should not be used inside a task function + #[error("MissingDataError: {0}")] + MissingData(String), + #[doc(hidden)] /// Encountered an error during service execution /// This should not be used inside a task function @@ -42,3 +49,83 @@ pub enum Error { #[error("Encountered an error during streaming")] SourceError(#[source] Arc), } + +impl From for Error { + fn from(err: BoxDynError) -> Self { + if let Some(e) = err.downcast_ref::() { + e.clone() + } else { + Error::Failed(Arc::new(err)) + } + } +} + +/// A Tower layer for handling and converting service errors into a custom `Error` type. +/// +/// This layer wraps a service and intercepts any errors returned by the service. +/// It attempts to downcast the error into the custom `Error` enum. If the downcast +/// succeeds, it returns the downcasted `Error`. If the downcast fails, the original +/// error is wrapped in `Error::Failed`. +/// +/// The service's error type must implement `Into`, allowing for flexible +/// error handling, especially when dealing with trait objects or complex error chains. +#[derive(Clone, Debug)] +pub struct ErrorHandlingLayer { + _p: PhantomData<()>, +} + +impl ErrorHandlingLayer { + /// Create a new ErrorHandlingLayer + pub fn new() -> Self { + Self { _p: PhantomData } + } +} + +impl Default for ErrorHandlingLayer { + fn default() -> Self { + Self::new() + } +} + +impl tower::layer::Layer for ErrorHandlingLayer { + type Service = ErrorHandlingService; + + fn layer(&self, service: S) -> Self::Service { + ErrorHandlingService { service } + } +} + +/// The underlying service +#[derive(Clone, Debug)] +pub struct ErrorHandlingService { + service: S, +} + +impl Service for ErrorHandlingService +where + S: Service, + S::Error: Into, + S::Future: Send + 'static, +{ + type Response = S::Response; + type Error = Error; + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.service.poll_ready(cx).map_err(|e| { + let boxed_error: BoxDynError = e.into(); + boxed_error.into() + }) + } + + fn call(&mut self, req: Request) -> Self::Future { + let fut = self.service.call(req); + + Box::pin(async move { + fut.await.map_err(|e| { + let boxed_error: BoxDynError = e.into(); + boxed_error.into() + }) + }) + } +} diff --git a/packages/apalis-core/src/layers.rs b/packages/apalis-core/src/layers.rs index 891be4a5..2d9c9713 100644 --- a/packages/apalis-core/src/layers.rs +++ b/packages/apalis-core/src/layers.rs @@ -1,5 +1,6 @@ use crate::error::{BoxDynError, Error}; use crate::request::Request; +use crate::response::Response; use futures::channel::mpsc::{SendError, Sender}; use futures::SinkExt; use futures::{future::BoxFuture, Future, FutureExt}; @@ -133,9 +134,9 @@ pub mod extensions { value: T, } - impl Service> for AddExtension + impl Service> for AddExtension where - S: Service>, + S: Service>, T: Clone + Send + Sync + 'static, { type Response = S::Response; @@ -147,8 +148,8 @@ pub mod extensions { self.inner.poll_ready(cx) } - fn call(&mut self, mut req: Request) -> Self::Future { - req.data.insert(self.value.clone()); + fn call(&mut self, mut req: Request) -> Self::Future { + req.parts.data.insert(self.value.clone()); self.inner.call(req) } } @@ -157,7 +158,7 @@ pub mod extensions { /// A trait for acknowledging successful processing /// This trait is called even when a task fails. /// This is a way of a [`Backend`] to save the result of a job or message -pub trait Ack { +pub trait Ack { /// The data to fetch from context to allow acknowledgement type Context; /// The error returned by the ack @@ -167,19 +168,19 @@ pub trait Ack { fn ack( &mut self, ctx: &Self::Context, - result: &Result, + response: &Response, ) -> impl Future> + Send; } impl Ack - for Sender<(Ctx, Result)> + for Sender<(Ctx, Response)> { type AckError = SendError; type Context = Ctx; async fn ack( &mut self, ctx: &Self::Context, - result: &Result, + result: &Response, ) -> Result<(), Self::AckError> { let ctx = ctx.clone(); self.send((ctx, result.clone())).await.unwrap(); @@ -189,13 +190,13 @@ impl Ack /// A layer that acknowledges a job completed successfully #[derive(Debug)] -pub struct AckLayer { +pub struct AckLayer { ack: A, - job_type: PhantomData, + job_type: PhantomData>, res: PhantomData, } -impl AckLayer { +impl AckLayer { /// Build a new [AckLayer] for a job pub fn new(ack: A) -> Self { Self { @@ -206,14 +207,14 @@ impl AckLayer { } } -impl Layer for AckLayer +impl Layer for AckLayer where - S: Service> + Send + 'static, + S: Service> + Send + 'static, S::Error: std::error::Error + Send + Sync + 'static, S::Future: Send + 'static, - A: Ack + Clone + Send + Sync + 'static, + A: Ack + Clone + Send + Sync + 'static, { - type Service = AckService; + type Service = AckService; fn layer(&self, service: S) -> Self::Service { AckService { @@ -227,14 +228,14 @@ where /// The underlying service for an [AckLayer] #[derive(Debug)] -pub struct AckService { +pub struct AckService { service: SV, ack: A, - job_type: PhantomData, + job_type: PhantomData>, res: PhantomData, } -impl Clone for AckService { +impl Clone for AckService { fn clone(&self) -> Self { Self { ack: self.ack.clone(), @@ -245,15 +246,22 @@ impl Clone for AckService { } } -impl Service> for AckService +impl Service> for AckService where - SV: Service> + Send + Sync + 'static, - >>::Error: Into + Send + Sync + 'static, - >>::Future: std::marker::Send + 'static, - A: Ack>>::Response> + Send + 'static + Clone + Send + Sync, - T: 'static + Send, - >>::Response: std::marker::Send + fmt::Debug + Sync + Serialize, - >::Context: Sync + Send + Clone, + SV: Service> + Send + Sync + 'static, + >>::Error: Into + Send + Sync + 'static, + >>::Future: std::marker::Send + 'static, + A: Ack>>::Response, Context = Ctx> + + Send + + 'static + + Clone + + Send + + Sync, + Req: 'static + Send, + >>::Response: std::marker::Send + fmt::Debug + Sync + Serialize, + >::Context: Sync + Send + Clone, + >>::Response>>::Context: 'static, + Ctx: Clone, { type Response = SV::Response; type Error = Error; @@ -268,12 +276,11 @@ where .map_err(|e| Error::Failed(Arc::new(e.into()))) } - fn call(&mut self, request: Request) -> Self::Future { + fn call(&mut self, request: Request) -> Self::Future { let mut ack = self.ack.clone(); - let data = request - .get::<>::Context>() - .cloned(); - + let ctx = request.parts.context.clone(); + let attempt = request.parts.attempt.clone(); + let task_id = request.parts.task_id.clone(); let fut = self.service.call(request); let fut_with_ack = async move { let res = fut.await.map_err(|err| { @@ -284,19 +291,17 @@ where } Error::Failed(Arc::new(e)) }); - - if let Some(ctx) = data { - if let Err(_e) = ack.ack(&ctx, &res).await { - // TODO: Implement tracing in apalis core - // tracing::error!("Acknowledgement Failed: {}", e); - } - } else { - // tracing::error!( - // "Acknowledgement could not be called due to missing ack data in context : {}", - // &std::any::type_name::<>::Acknowledger>() - // ); + let response = Response { + attempt, + inner: res, + task_id, + _priv: (), + }; + if let Err(_e) = ack.ack(&ctx, &response).await { + // TODO: Implement tracing in apalis core + // tracing::error!("Acknowledgement Failed: {}", e); } - res + response.inner }; fut_with_ack.boxed() } diff --git a/packages/apalis-core/src/lib.rs b/packages/apalis-core/src/lib.rs index 0e7157ec..6f01618f 100644 --- a/packages/apalis-core/src/lib.rs +++ b/packages/apalis-core/src/lib.rs @@ -251,28 +251,29 @@ pub mod test_utils { /// } ///} /// ```` - impl TestWrapper + impl TestWrapper, Res> where - B: Backend, Res> + Send + Sync + 'static + Clone, + B: Backend, Res> + Send + Sync + 'static + Clone, Req: Send + 'static, + Ctx: Send, B::Stream: Send + 'static, - B::Stream: Stream>, crate::error::Error>> + Unpin, + B::Stream: Stream>, crate::error::Error>> + Unpin, { /// Build a new instance provided a custom service pub fn new_with_service(backend: B, service: S) -> (Self, BoxFuture<'static, ()>) where - S: Service, Response = Res> + Send + 'static, + S: Service, Response = Res> + Send + 'static, B::Layer: Layer, - <, Res>>::Layer as Layer>::Service: - Service> + Send + 'static, - <<, Res>>::Layer as Layer>::Service as Service< - Request, + <, Res>>::Layer as Layer>::Service: + Service> + Send + 'static, + <<, Res>>::Layer as Layer>::Service as Service< + Request, >>::Response: Send + Debug, - <<, Res>>::Layer as Layer>::Service as Service< - Request, + <<, Res>>::Layer as Layer>::Service as Service< + Request, >>::Error: Send + Into + Sync, - <<, Res>>::Layer as Layer>::Service as Service< - Request, + <<, Res>>::Layer as Layer>::Service as Service< + Request, >>::Future: Send + 'static, { let worker_id = WorkerId::new("test-worker"); @@ -291,10 +292,7 @@ pub mod test_utils { item = poller.stream.next().fuse() => match item { Some(Ok(Some(req))) => { - - let task_id = req.get::().cloned().unwrap_or_default(); - // .expect("Request does not contain Task_ID"); - // handle request + let task_id = req.parts.task_id.clone(); match service.call(req).await { Ok(res) => { res_tx.send((task_id, Ok(format!("{res:?}")))).await.unwrap(); @@ -340,9 +338,9 @@ pub mod test_utils { } } - impl Deref for TestWrapper + impl Deref for TestWrapper, Res> where - B: Backend, Res>, + B: Backend, Res>, { type Target = B; @@ -351,9 +349,9 @@ pub mod test_utils { } } - impl DerefMut for TestWrapper + impl DerefMut for TestWrapper, Res> where - B: Backend, Res>, + B: Backend, Res>, { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.backend @@ -369,7 +367,7 @@ pub mod test_utils { #[tokio::test] async fn it_works_as_an_mq_backend() { let backend = $backend_instance; - let service = apalis_test_service_fn(|request: Request| async { + let service = apalis_test_service_fn(|request: Request| async { Ok::<_, io::Error>(request) }); let (mut t, poller) = TestWrapper::new_with_service(backend, service); @@ -388,8 +386,8 @@ pub mod test_utils { #[tokio::test] async fn integration_test_storage_push_and_consume() { let backend = $setup().await; - let service = apalis_test_service_fn(|request: Request| async move { - Ok::<_, io::Error>(request.take()) + let service = apalis_test_service_fn(|request: Request| async move { + Ok::<_, io::Error>(request.args) }); let (mut t, poller) = TestWrapper::new_with_service(backend, service); tokio::spawn(poller); diff --git a/packages/apalis-core/src/memory.rs b/packages/apalis-core/src/memory.rs index b8f08b45..731c4505 100644 --- a/packages/apalis-core/src/memory.rs +++ b/packages/apalis-core/src/memory.rs @@ -52,8 +52,8 @@ impl Clone for MemoryStorage { /// In-memory queue that implements [Stream] #[derive(Debug)] pub struct MemoryWrapper { - sender: Sender>, - receiver: Arc>>>, + sender: Sender>, + receiver: Arc>>>, } impl Clone for MemoryWrapper { @@ -84,7 +84,7 @@ impl Default for MemoryWrapper { } impl Stream for MemoryWrapper { - type Item = Request; + type Item = Request; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { if let Some(mut receiver) = self.receiver.try_lock() { @@ -96,8 +96,8 @@ impl Stream for MemoryWrapper { } // MemoryStorage as a Backend -impl Backend, Res> for MemoryStorage { - type Stream = BackendStream>>; +impl Backend, Res> for MemoryStorage { + type Stream = BackendStream>>; type Layer = Identity; diff --git a/packages/apalis-core/src/monitor/mod.rs b/packages/apalis-core/src/monitor/mod.rs index 82d19c4a..92e0e453 100644 --- a/packages/apalis-core/src/monitor/mod.rs +++ b/packages/apalis-core/src/monitor/mod.rs @@ -79,27 +79,24 @@ impl Debug for Monitor { impl Monitor { /// Registers a single instance of a [Worker] - pub fn register< - J: Send + Sync + 'static, - S: Service> + Send + 'static, - P: Backend, Res> + 'static, - Res: 'static - >( - mut self, - worker: Worker>, - ) -> Self + pub fn register(mut self, worker: Worker>) -> Self where S::Future: Send, S::Response: 'static + Send + Sync + Serialize, S::Error: Send + Sync + 'static + Into, -

, Res>>::Stream: Unpin + Send + 'static, + P::Stream: Unpin + Send + 'static, P::Layer: Layer, - <

, Res>>::Layer as Layer>::Service: Service, Response = Res>, - <

, Res>>::Layer as Layer>::Service: Send, - <<

, Res>>::Layer as Layer>::Service as Service>>::Future: - Send, - <<

, Res>>::Layer as Layer>::Service as Service>>::Error: + >::Service: Service, Response = Res>, + >::Service: Send, + <>::Service as Service>>::Future: Send, + <>::Service as Service>>::Error: Send + Into + Sync, + S: Service, Response = Res> + Send + 'static, + Ctx: Send + Sync + 'static, + Req: Send + Sync + 'static, + P: Backend, Res> + 'static, + Res: 'static, + Ctx: Send + Sync + 'static, { self.workers.push(worker.with_monitor(&self)); @@ -116,12 +113,7 @@ impl Monitor { /// # Returns /// /// The monitor instance, with all workers added to the collection. - pub fn register_with_count< - J: Send + Sync + 'static, - S: Service> + Send + 'static, - P: Backend, Res> + 'static, - Res: 'static + Send, - >( + pub fn register_with_count( mut self, count: usize, worker: Worker>, @@ -130,14 +122,21 @@ impl Monitor { S::Future: Send, S::Response: 'static + Send + Sync + Serialize, S::Error: Send + Sync + 'static + Into, -

, Res>>::Stream: Unpin + Send + 'static, + P::Stream: Unpin + Send + 'static, P::Layer: Layer, - <

, Res>>::Layer as Layer>::Service: Service, Response = Res>, - <

, Res>>::Layer as Layer>::Service: Send, - <<

, Res>>::Layer as Layer>::Service as Service>>::Future: - Send, - <<

, Res>>::Layer as Layer>::Service as Service>>::Error: + P: Backend, Res> + 'static, + >::Service: Service, Response = Res>, + >::Service: Send, + <>::Service as Service>>::Future: Send, + <>::Service as Service>>::Error: Send + Into + Sync, + S: Service, Response = Res> + Send + 'static, + Ctx: Send + Sync + 'static, + Req: Send + Sync + 'static, + S: Service> + Send + 'static, + P: Backend, Res> + 'static, + Res: 'static, + Ctx: Send + Sync + 'static, { let workers = worker.with_monitor_instances(count, &self); self.workers.extend(workers); @@ -328,7 +327,7 @@ mod tests { handle.enqueue(i).await.unwrap(); } }); - let service = tower::service_fn(|request: Request| async { + let service = tower::service_fn(|request: Request| async { tokio::time::sleep(Duration::from_secs(1)).await; Ok::<_, io::Error>(request) }); @@ -354,7 +353,7 @@ mod tests { handle.enqueue(i).await.unwrap(); } }); - let service = tower::service_fn(|request: Request| async { + let service = tower::service_fn(|request: Request| async { tokio::time::sleep(Duration::from_secs(1)).await; Ok::<_, io::Error>(request) }); diff --git a/packages/apalis-core/src/request.rs b/packages/apalis-core/src/request.rs index 11bea89f..428b4a89 100644 --- a/packages/apalis-core/src/request.rs +++ b/packages/apalis-core/src/request.rs @@ -8,56 +8,93 @@ use crate::{ data::Extensions, error::Error, poller::Poller, - task::{attempt::Attempt, task_id::TaskId}, + task::{attempt::Attempt, namespace::Namespace, task_id::TaskId}, worker::WorkerId, Backend, }; /// Represents a job which can be serialized and executed -#[derive(Serialize, Debug, Deserialize, Clone)] -pub struct Request { - pub(crate) args: T, +#[derive(Serialize, Debug, Deserialize, Clone, Default)] +pub struct Request { + /// The inner request part + pub args: Args, + /// Parts of the request eg id, attempts and context + pub parts: Parts, +} + +/// Component parts of a `Request` +#[non_exhaustive] +#[derive(Serialize, Debug, Deserialize, Clone, Default)] +pub struct Parts { + /// The request's id + pub task_id: TaskId, + + /// The request's extensions #[serde(skip)] - pub(crate) data: Extensions, + pub data: Extensions, + + /// The request's attempts + pub attempt: Attempt, + + /// The Context stored by the storage + pub context: Ctx, + + /// Represents the namespace + #[serde(skip)] + pub namespace: Option, } -impl Request { +impl Request { /// Creates a new [Request] - pub fn new(req: T) -> Self { - let id = TaskId::new(); - let mut data = Extensions::new(); - data.insert(id); - data.insert(Attempt::default()); - Self::new_with_data(req, data) + pub fn new(args: T) -> Self { + Self::new_with_data(args, Extensions::default(), Ctx::default()) + } + + /// Creates a request with all parts provided + pub fn new_with_parts(args: T, parts: Parts) -> Self { + Self { args, parts } } /// Creates a request with context provided - pub fn new_with_data(req: T, data: Extensions) -> Self { - Self { args: req, data } + pub fn new_with_ctx(req: T, ctx: Ctx) -> Self { + Self { + args: req, + parts: Parts { + context: ctx, + ..Default::default() + }, + } } - /// Get the underlying reference of the request - pub fn inner(&self) -> &T { - &self.args + /// Creates a request with data and context provided + pub fn new_with_data(req: T, data: Extensions, ctx: Ctx) -> Self { + Self { + args: req, + parts: Parts { + context: ctx, + data, + ..Default::default() + }, + } } - /// Take the underlying reference of the request - pub fn take(self) -> T { - self.args + /// Take the parts + pub fn take_parts(self) -> (T, Parts) { + (self.args, self.parts) } } -impl std::ops::Deref for Request { +impl std::ops::Deref for Request { type Target = Extensions; fn deref(&self) -> &Self::Target { - &self.data + &self.parts.data } } -impl std::ops::DerefMut for Request { +impl std::ops::DerefMut for Request { fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.data + &mut self.parts.data } } @@ -69,7 +106,7 @@ pub type RequestFuture = BoxFuture<'static, T>; /// Represents a stream for T. pub type RequestStream = BoxStream<'static, Result, Error>>; -impl Backend, Res> for RequestStream> { +impl Backend, Res> for RequestStream> { type Stream = Self; type Layer = Identity; diff --git a/packages/apalis-core/src/response.rs b/packages/apalis-core/src/response.rs index efda8920..eb917be8 100644 --- a/packages/apalis-core/src/response.rs +++ b/packages/apalis-core/src/response.rs @@ -1,6 +1,116 @@ -use std::{any::Any, sync::Arc}; +use std::{any::Any, fmt::Debug, sync::Arc}; -use crate::error::Error; +use crate::{ + error::Error, + task::{attempt::Attempt, task_id::TaskId}, +}; + +/// A generic `Response` struct that wraps the result of a task, containing the outcome (`Ok` or `Err`), +/// task metadata such as `task_id`, `attempt`, and an internal marker field for future extensions. +/// +/// # Type Parameters +/// - `Res`: The successful result type of the response. +/// +/// # Fields +/// - `inner`: A `Result` that holds either the success value of type `Res` or an `Error` on failure. +/// - `task_id`: A `TaskId` representing the unique identifier for the task. +/// - `attempt`: An `Attempt` representing how many attempts were made to complete the task. +/// - `_priv`: A private marker field to prevent external construction of the `Response`. +#[derive(Debug, Clone)] +pub struct Response { + /// The result from a task + pub inner: Result, + /// The task id + pub task_id: TaskId, + /// The current attempt + pub attempt: Attempt, + pub(crate) _priv: (), +} + +impl Response { + /// Creates a new `Response` instance. + /// + /// # Arguments + /// - `inner`: A `Result` holding either a successful response of type `Res` or an `Error`. + /// - `task_id`: A `TaskId` representing the unique identifier for the task. + /// - `attempt`: The attempt count when creating this response. + /// + /// # Returns + /// A new `Response` instance. + pub fn new(inner: Result, task_id: TaskId, attempt: Attempt) -> Self { + Response { + inner, + task_id, + attempt, + _priv: (), + } + } + + /// Constructs a successful `Response`. + /// + /// # Arguments + /// - `res`: The success value of type `Res`. + /// - `task_id`: A `TaskId` representing the unique identifier for the task. + /// - `attempt`: The attempt count when creating this response. + /// + /// # Returns + /// A `Response` instance containing the success value. + pub fn success(res: Res, task_id: TaskId, attempt: Attempt) -> Self { + Self::new(Ok(res), task_id, attempt) + } + + /// Constructs a failed `Response`. + /// + /// # Arguments + /// - `error`: The `Error` that occurred. + /// - `task_id`: A `TaskId` representing the unique identifier for the task. + /// - `attempt`: The attempt count when creating this response. + /// + /// # Returns + /// A `Response` instance containing the error. + pub fn failure(error: Error, task_id: TaskId, attempt: Attempt) -> Self { + Self::new(Err(error), task_id, attempt) + } + + /// Checks if the `Response` contains a success (`Ok`). + /// + /// # Returns + /// `true` if the `Response` is successful, `false` otherwise. + pub fn is_success(&self) -> bool { + self.inner.is_ok() + } + + /// Checks if the `Response` contains a failure (`Err`). + /// + /// # Returns + /// `true` if the `Response` is a failure, `false` otherwise. + pub fn is_failure(&self) -> bool { + self.inner.is_err() + } + + /// Maps the success value (`Res`) of the `Response` to another type using the provided function. + /// + /// # Arguments + /// - `f`: A function that takes a reference to the success value and returns a new value of type `T`. + /// + /// # Returns + /// A new `Response` with the transformed success value or the same error. + /// + /// # Type Parameters + /// - `F`: A function or closure that takes a reference to a value of type `Res` and returns a value of type `T`. + /// - `T`: The new type of the success value after mapping. + pub fn map(&self, f: F) -> Response + where + F: FnOnce(&Res) -> T, + { + Response { + inner: self.inner.as_ref().map(f).map_err(|e| e.clone()), + task_id: self.task_id.clone(), + attempt: self.attempt.clone(), + _priv: (), + } + } +} /// Helper for Job Responses pub trait IntoResponse { diff --git a/packages/apalis-core/src/service_fn.rs b/packages/apalis-core/src/service_fn.rs index d89e4dfe..85ef4e31 100644 --- a/packages/apalis-core/src/service_fn.rs +++ b/packages/apalis-core/src/service_fn.rs @@ -1,3 +1,4 @@ +use crate::error::Error; use crate::layers::extensions::Data; use crate::request::Request; use crate::response::IntoResponse; @@ -10,20 +11,25 @@ use std::task::{Context, Poll}; use tower::Service; /// A helper method to build functions -pub fn service_fn(f: T) -> ServiceFn { - ServiceFn { f, k: PhantomData } +pub fn service_fn(f: T) -> ServiceFn { + ServiceFn { + f, + req: PhantomData, + fn_args: PhantomData, + } } /// An executable service implemented by a closure. /// /// See [`service_fn`] for more details. #[derive(Copy, Clone)] -pub struct ServiceFn { +pub struct ServiceFn { f: T, - k: PhantomData, + req: PhantomData>, + fn_args: PhantomData, } -impl fmt::Debug for ServiceFn { +impl fmt::Debug for ServiceFn { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ServiceFn") .field("f", &format_args!("{}", std::any::type_name::())) @@ -34,48 +40,62 @@ impl fmt::Debug for ServiceFn { /// The Future returned from [`ServiceFn`] service. pub type FnFuture = Map std::result::Result>; -/// Allows getting some type from the [Request] data -pub trait FromData: Sized + Clone + Send + Sync + 'static { - /// Gets the value - fn get(data: &crate::data::Extensions) -> Self { - data.get::().unwrap().clone() - } +/// Handles extraction +pub trait FromRequest: Sized { + /// Perform the extraction. + fn from_request(req: &Req) -> Result; } -impl FromData for Data { - fn get(ctx: &crate::data::Extensions) -> Self { - Data::new(ctx.get::().unwrap().clone()) +impl FromRequest> for Data { + fn from_request(req: &Request) -> Result { + req.parts.data.get_checked().cloned().map(Data::new) } } macro_rules! impl_service_fn { ($($K:ident),+) => { #[allow(unused_parens)] - impl Service> for ServiceFn + impl Service> for ServiceFn where T: FnMut(Req, $($K),+) -> F, F: Future, F::Output: IntoResponse>, - $($K: FromData),+, + $($K: FromRequest>),+, + E: From { type Response = R; type Error = E; - type Future = FnFuture; + type Future = futures::future::Either>, FnFuture>; fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll> { Poll::Ready(Ok(())) } - fn call(&mut self, task: Request) -> Self::Future { - let fut = (self.f)(task.args, $($K::get(&task.data)),+); + fn call(&mut self, task: Request) -> Self::Future { + + #[allow(non_snake_case)] + let fut = { + let results: Result<($($K),+), E> = (|| { + Ok(($($K::from_request(&task)?),+)) + })(); + + match results { + Ok(($($K),+)) => { + let req = task.args; + (self.f)(req, $($K),+) + } + Err(e) => return futures::future::Either::Left(futures::future::err(e).into()), + } + }; + - fut.map(F::Output::into_response) + futures::future::Either::Right(fut.map(F::Output::into_response)) } } }; } -impl Service> for ServiceFn +impl Service> for ServiceFn where T: FnMut(Req) -> F, F: Future, @@ -89,7 +109,7 @@ where Poll::Ready(Ok(())) } - fn call(&mut self, task: Request) -> Self::Future { + fn call(&mut self, task: Request) -> Self::Future { let fut = (self.f)(task.args); fut.map(F::Output::into_response) diff --git a/packages/apalis-core/src/storage/mod.rs b/packages/apalis-core/src/storage/mod.rs index f3b26fe3..c761c745 100644 --- a/packages/apalis-core/src/storage/mod.rs +++ b/packages/apalis-core/src/storage/mod.rs @@ -1,11 +1,11 @@ use std::time::Duration; -use futures::{stream::BoxStream, Future}; +use futures::Future; -use crate::request::Request; - -/// The result of sa stream produced by a [Storage] -pub type StorageStream = BoxStream<'static, Result>, E>>; +use crate::{ + request::{Parts, Request}, + task::task_id::TaskId, +}; /// Represents a [Storage] that can persist a request. pub trait Storage { @@ -15,21 +15,38 @@ pub trait Storage { /// The error produced by the storage type Error; - /// Jobs must have Ids. - type Identifier; + /// This is the type that storages store as the metadata related to a job + type Context: Default; /// Pushes a job to a storage fn push( &mut self, job: Self::Job, - ) -> impl Future> + Send; + ) -> impl Future, Self::Error>> + Send { + self.push_request(Request::new(job)) + } + + /// Pushes a constructed request to a storage + fn push_request( + &mut self, + req: Request, + ) -> impl Future, Self::Error>> + Send; - /// Push a job into the scheduled set + /// Push a job with defaults into the scheduled set fn schedule( &mut self, job: Self::Job, on: i64, - ) -> impl Future> + Send; + ) -> impl Future, Self::Error>> + Send { + self.schedule_request(Request::new(job), on) + } + + /// Push a request into the scheduled set + fn schedule_request( + &mut self, + request: Request, + on: i64, + ) -> impl Future, Self::Error>> + Send; /// Return the number of pending jobs from the queue fn len(&mut self) -> impl Future> + Send; @@ -37,19 +54,19 @@ pub trait Storage { /// Fetch a job given an id fn fetch_by_id( &mut self, - job_id: &Self::Identifier, - ) -> impl Future>, Self::Error>> + Send; + job_id: &TaskId, + ) -> impl Future>, Self::Error>> + Send; /// Update a job details fn update( &mut self, - job: Request, + job: Request, ) -> impl Future> + Send; /// Reschedule a job fn reschedule( &mut self, - job: Request, + job: Request, wait: Duration, ) -> impl Future> + Send; diff --git a/packages/apalis-core/src/task/attempt.rs b/packages/apalis-core/src/task/attempt.rs index 3f4825a1..9c1d84ee 100644 --- a/packages/apalis-core/src/task/attempt.rs +++ b/packages/apalis-core/src/task/attempt.rs @@ -5,6 +5,8 @@ use std::sync::{ use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use crate::{request::Request, service_fn::FromRequest}; + /// A wrapper to keep count of the attempts tried by a task #[derive(Debug, Clone)] pub struct Attempt(Arc); @@ -72,3 +74,9 @@ impl Attempt { self.0.fetch_add(1, std::sync::atomic::Ordering::Relaxed) } } + +impl FromRequest> for Attempt { + fn from_request(req: &Request) -> Result { + Ok(req.parts.attempt.clone()) + } +} diff --git a/packages/apalis-core/src/task/namespace.rs b/packages/apalis-core/src/task/namespace.rs index c38f60bb..16a5c9d0 100644 --- a/packages/apalis-core/src/task/namespace.rs +++ b/packages/apalis-core/src/task/namespace.rs @@ -2,8 +2,10 @@ use std::convert::From; use std::fmt::{self, Display, Formatter}; use std::ops::Deref; +use serde::{Deserialize, Serialize}; + /// A wrapper type that defines a task's namespace. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct Namespace(pub String); impl Deref for Namespace { diff --git a/packages/apalis-core/src/worker/mod.rs b/packages/apalis-core/src/worker/mod.rs index a4c1f95a..e5cf5c69 100644 --- a/packages/apalis-core/src/worker/mod.rs +++ b/packages/apalis-core/src/worker/mod.rs @@ -6,7 +6,7 @@ use crate::monitor::{Monitor, MonitorContext}; use crate::notify::Notify; use crate::poller::FetchNext; use crate::request::Request; -use crate::service_fn::FromData; +use crate::service_fn::FromRequest; use crate::Backend; use futures::future::Shared; use futures::{Future, FutureExt}; @@ -77,8 +77,6 @@ impl FromStr for WorkerId { } } -impl FromData for WorkerId {} - impl Display for WorkerId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str(self.name())?; @@ -218,222 +216,76 @@ impl Worker> { } impl Worker> { - /// Start a worker with a custom executor - pub fn with_executor(self, executor: E) -> Worker> - where - S: Service> + Send + 'static, - P: Backend, Res> + 'static, - J: Send + 'static + Sync, - S::Future: Send, - S::Response: 'static + Send + Sync + Serialize, - S::Error: Send + Sync + 'static + Into, - S::Error: Send + Sync + 'static + Into, -

, Res>>::Stream: Unpin + Send + 'static, - E: Executor + Clone + Send + 'static + Sync, - P::Layer: Layer, - <

, Res>>::Layer as Layer>::Service: Service, Response = Res> + 'static, - <

, Res>>::Layer as Layer>::Service: Send, - <<

, Res>>::Layer as Layer>::Service as Service>>::Future: - Send, - <<

, Res>>::Layer as Layer>::Service as Service>>::Error: - Send + std::error::Error + Sync, - { - let notifier = Notify::new(); - let service = self.state.service; - let backend = self.state.backend; - let poller = backend - .poll::<<

, Res>>::Layer as Layer>::Service>(self.id.clone()); - let polling = poller.heartbeat.shared(); - let default_layer = poller.layer; - let service = default_layer.layer(service); - let worker_stream = WorkerStream::new(poller.stream, notifier.clone()) - .into_future() - .shared(); - Self::build_worker_instance( - WorkerId::new(self.id.name()), - service, - executor.clone(), - notifier.clone(), - polling.clone(), - worker_stream.clone(), - None, - ) - } - - /// Run as a monitored worker - pub fn with_monitor(self, monitor: &Monitor) -> Worker> - where - S: Service> + Send + 'static, - P: Backend, Res> + 'static, - J: Send + 'static + Sync, - S::Future: Send, - S::Response: 'static + Send + Sync + Serialize, - S::Error: Send + Sync + 'static + Into, -

, Res>>::Stream: Unpin + Send + 'static, - E: Executor + Clone + Send + 'static + Sync, - P::Layer: Layer, - <

, Res>>::Layer as Layer>::Service: Service, Response = Res>, - <

, Res>>::Layer as Layer>::Service: Send, - <<

, Res>>::Layer as Layer>::Service as Service< - Request, - >>::Future: Send, - <<

, Res>>::Layer as Layer>::Service as Service< - Request, - >>::Error: Send + Into + Sync, - { - let notifier = Notify::new(); - let service = self.state.service; - let backend = self.state.backend; - let executor = monitor.executor().clone(); - let context = monitor.context().clone(); - let poller = backend - .poll::<<

, Res>>::Layer as Layer>::Service>(self.id.clone()); - let default_layer = poller.layer; - let service = default_layer.layer(service); - let polling = poller.heartbeat.shared(); - let worker_stream = WorkerStream::new(poller.stream, notifier.clone()) - .into_future() - .shared(); - Self::build_worker_instance( - WorkerId::new(self.id.name()), - service, - executor.clone(), - notifier.clone(), - polling.clone(), - worker_stream.clone(), - Some(context.clone()), - ) - } - - /// Run a specified amounts of instances - pub fn with_monitor_instances( + fn common_worker_setup( self, + executor: E, + context: Option, instances: usize, - monitor: &Monitor, ) -> Vec>> where - S: Service> + Send + 'static, - P: Backend, Res> + 'static, - J: Send + 'static + Sync, + S: Service, Response = Res> + Send + 'static, + P: Backend, Res> + 'static, + Req: Send + 'static + Sync, S::Future: Send, S::Response: 'static + Send + Sync + Serialize, S::Error: Send + Sync + 'static + Into, -

, Res>>::Stream: Unpin + Send + 'static, + P::Stream: Unpin + Send + 'static, E: Executor + Clone + Send + 'static + Sync, P::Layer: Layer, - <

, Res>>::Layer as Layer>::Service: Service, Response = Res>, - <

, Res>>::Layer as Layer>::Service: Send, - <<

, Res>>::Layer as Layer>::Service as Service< - Request, - >>::Future: Send, - <<

, Res>>::Layer as Layer>::Service as Service< - Request, - >>::Error: Send + Into + Sync, + >::Service: Service, Response = Res> + Send, + <>::Service as Service>>::Future: Send, + <>::Service as Service>>::Error: + Send + Into + Sync, + Ctx: Send + 'static + Sync, { let notifier = Notify::new(); let service = self.state.service; - let backend = self.state.backend; - let executor = monitor.executor().clone(); - let context = monitor.context().clone(); - let poller = backend - .poll::<<

, Res>>::Layer as Layer>::Service>(self.id.clone()); - let default_layer = poller.layer; - let service = default_layer.layer(service); - let (service, poll_worker) = Buffer::pair(service, instances); - let polling = poller.heartbeat.shared(); - let worker_stream = WorkerStream::new(poller.stream, notifier.clone()) - .into_future() - .shared(); - let mut workers = Vec::new(); - - executor.spawn(poll_worker); - - for instance in 0..instances { - workers.push(Self::build_worker_instance( - WorkerId::new_with_instance(self.id.name(), instance), - service.clone(), - executor.clone(), - notifier.clone(), - polling.clone(), - worker_stream.clone(), - Some(context.clone()), - )); - } - - workers - } - /// Run specified worker instances via a specific executor - pub fn with_executor_instances( - self, - instances: usize, - executor: E, - ) -> Vec>> - where - S: Service, Response = Res> + Send + 'static, - P: Backend, Res> + 'static, - J: Send + 'static + Sync, - S::Future: Send, - S::Response: 'static + Send + Sync + Serialize, - S::Error: Send + Sync + 'static + Into, - S::Error: Send + Sync + 'static + Into, -

, Res>>::Stream: Unpin + Send + 'static, - E: Executor + Clone + Send + 'static + Sync, - P::Layer: Layer, - <

, Res>>::Layer as Layer>::Service: Service, Response = Res>, - <

, Res>>::Layer as Layer>::Service: Send, - <<

, Res>>::Layer as Layer>::Service as Service< - Request, - >>::Future: Send, - <<

, Res>>::Layer as Layer>::Service as Service< - Request, - >>::Error: Send + Into + Sync, - { - let worker_id = self.id.clone(); - let notifier = Notify::new(); - let service = self.state.service; let (service, poll_worker) = Buffer::pair(service, instances); let backend = self.state.backend; - let poller = backend.poll::(worker_id.clone()); + let poller = backend.poll::(self.id.clone()); let polling = poller.heartbeat.shared(); let worker_stream = WorkerStream::new(poller.stream, notifier.clone()) .into_future() .shared(); + executor.spawn(poll_worker); - let mut workers = Vec::new(); - for instance in 0..instances { - workers.push(Self::build_worker_instance( - WorkerId::new_with_instance(self.id.name(), instance), - service.clone(), - executor.clone(), - notifier.clone(), - polling.clone(), - worker_stream.clone(), - None, - )); - } - workers - } - pub(crate) fn build_worker_instance( + (0..instances) + .map(|instance| { + Self::build_worker_instance( + WorkerId::new_with_instance(self.id.name(), instance), + service.clone(), + executor.clone(), + notifier.clone(), + polling.clone(), + worker_stream.clone(), + context.clone(), + ) + }) + .collect() + } + + fn build_worker_instance( id: WorkerId, service: LS, executor: E, - notifier: WorkerNotify>, Error>>, + notifier: WorkerNotify>, Error>>, polling: Shared + Send + 'static>, worker_stream: Shared + Send + 'static>, context: Option, ) -> Worker> where - LS: Service, Response = Res> + Send + 'static, + LS: Service, Response = Res> + Send + 'static, LS::Future: Send + 'static, - LS::Response: 'static, + LS::Response: 'static + Send + Sync + Serialize, LS::Error: Send + Sync + Into + 'static, - P: Backend, Res>, + P: Backend, Res>, E: Executor + Send + Clone + 'static + Sync, - J: Sync + Send + 'static, + Req: Sync + Send + 'static, S: 'static, P: 'static, + Ctx: Send + 'static + Sync, { let instance = id.instance.unwrap_or_default(); let ctx = Context { @@ -454,17 +306,119 @@ impl Worker> { worker } - pub(crate) async fn build_instance( + /// Setup a worker with an executor + pub fn with_executor(self, executor: E) -> Worker> + where + S: Service, Response = Res> + Send + 'static, + P: Backend, Res> + 'static, + Req: Send + 'static + Sync, + S::Future: Send, + S::Response: 'static + Send + Sync + Serialize, + S::Error: Send + Sync + 'static + Into, + P::Stream: Unpin + Send + 'static, + E: Executor + Clone + Send + 'static + Sync, + P::Layer: Layer, + >::Service: Service, Response = Res> + Send, + <>::Service as Service>>::Future: Send, + <>::Service as Service>>::Error: + Send + Into + Sync, + Ctx: Send + Sync + 'static, + { + self.common_worker_setup(executor, None, 1).pop().unwrap() + } + + /// Setup a worker with the monitor + pub fn with_monitor(self, monitor: &Monitor) -> Worker> + where + S: Service, Response = Res> + Send + 'static, + P: Backend, Res> + 'static, + Req: Send + 'static + Sync, + S::Future: Send, + S::Response: 'static + Send + Sync + Serialize, + S::Error: Send + Sync + 'static + Into, + P::Stream: Unpin + Send + 'static, + E: Executor + Clone + Send + 'static + Sync, + P::Layer: Layer, + >::Service: Service, Response = Res> + Send, + <>::Service as Service>>::Future: Send, + <>::Service as Service>>::Error: + Send + Into + Sync, + Ctx: Send + Sync + 'static, + { + self.common_worker_setup( + monitor.executor().clone(), + Some(monitor.context().clone()), + 1, + ) + .pop() + .unwrap() + } + + /// Setup instances of the worker with the Monitor + pub fn with_monitor_instances( + self, + instances: usize, + monitor: &Monitor, + ) -> Vec>> + where + S: Service, Response = Res> + Send + 'static, + P: Backend, Res> + 'static, + Req: Send + 'static + Sync, + S::Future: Send, + S::Response: 'static + Send + Sync + Serialize, + S::Error: Send + Sync + 'static + Into, + P::Stream: Unpin + Send + 'static, + E: Executor + Clone + Send + 'static + Sync, + P::Layer: Layer, + >::Service: Service, Response = Res> + Send, + <>::Service as Service>>::Future: Send, + <>::Service as Service>>::Error: + Send + Into + Sync, + Ctx: Send + Sync + 'static, + { + self.common_worker_setup( + monitor.executor().clone(), + Some(monitor.context().clone()), + instances, + ) + } + + /// Setup worker instances providing an executor + pub fn with_executor_instances( + self, + instances: usize, + executor: E, + ) -> Vec>> + where + S: Service, Response = Res> + Send + 'static, + P: Backend, Res> + 'static, + Req: Send + 'static + Sync, + S::Future: Send, + S::Response: 'static + Send + Sync + Serialize, + S::Error: Send + Sync + 'static + Into, + P::Stream: Unpin + Send + 'static, + E: Executor + Clone + Send + 'static + Sync, + P::Layer: Layer, + >::Service: Service, Response = Res> + Send, + <>::Service as Service>>::Future: Send, + <>::Service as Service>>::Error: + Send + Into + Sync, + Ctx: Send + Sync + 'static, + { + self.common_worker_setup(executor, None, instances) + } + + pub(crate) async fn build_instance( instance: usize, service: LS, worker: Worker>, - notifier: WorkerNotify>, Error>>, + notifier: WorkerNotify>, Error>>, ) where - LS: Service, Response = Res> + Send + 'static, + LS: Service, Response = Res> + Send + 'static, LS::Future: Send + 'static, LS::Response: 'static, LS::Error: Send + Sync + Into + 'static, - P: Backend, Res>, + P: Backend, Res>, E: Executor + Send + Clone + 'static + Sync, { if let Some(ctx) = worker.state.context.as_ref() { @@ -502,12 +456,20 @@ impl Worker> { Ok(Ok(Some(req))) => { let fut = service.call(req); let worker_id = worker_id.clone(); + let w = worker.clone(); let state = worker.state.clone(); worker.spawn(fut.map(move |res| { if let Err(e) = res { + let error = e.into(); + if let Some(Error::MissingData(e)) = + error.downcast_ref::() + { + w.force_stop(); + unreachable!("Worker missing required context: {}", e); + } if let Some(ctx) = state.context.as_ref() { ctx.notify(Worker { - state: Event::Error(e.into()), + state: Event::Error(error), id: WorkerId::new_with_instance( worker_id.name(), instance, @@ -551,6 +513,7 @@ impl Worker> { } } } + /// Stores the Workers context #[derive(Clone)] pub struct Context { @@ -571,6 +534,12 @@ impl fmt::Debug for Context { } } +impl FromRequest> for Context { + fn from_request(req: &Request) -> Result { + req.get_checked::().cloned() + } +} + pin_project! { struct Tracked { worker: Context, @@ -648,7 +617,7 @@ impl Context { pub fn is_shutting_down(&self) -> bool { self.context .as_ref() - .map(|s| s.shutdown().is_shutting_down()) + .map(|s| !self.is_running() || s.shutdown().is_shutting_down()) .unwrap_or(!self.is_running()) } @@ -661,7 +630,7 @@ impl Context { } } -impl FromData for Context {} +// impl FromRequest for Context {} impl Future for Context { type Output = (); @@ -686,7 +655,7 @@ impl Future for Context { #[cfg(test)] mod tests { - use std::{io, ops::Deref, sync::atomic::AtomicUsize, time::Duration}; + use std::{ops::Deref, sync::atomic::AtomicUsize, time::Duration}; #[derive(Debug, Clone)] struct TokioTestExecutor; @@ -754,15 +723,13 @@ mod tests { } } - async fn task(job: u32, count: Data) -> Result<(), io::Error> { + async fn task(job: u32, count: Data) { count.fetch_add(1, Ordering::Relaxed); if job == ITEMS - 1 { tokio::time::sleep(Duration::from_secs(1)).await; } - Ok(()) } let worker = WorkerBuilder::new("rango-tango") - // .chain(|svc| svc.timeout(Duration::from_millis(500))) .data(Count::default()) .backend(in_memory); let worker = worker.build_fn(task); diff --git a/packages/apalis-cron/src/lib.rs b/packages/apalis-cron/src/lib.rs index afe5402b..9680ec76 100644 --- a/packages/apalis-cron/src/lib.rs +++ b/packages/apalis-cron/src/lib.rs @@ -57,14 +57,13 @@ //! } //! ``` -use apalis_core::data::Extensions; use apalis_core::layers::Identity; use apalis_core::poller::Poller; use apalis_core::request::RequestStream; -use apalis_core::task::task_id::TaskId; +use apalis_core::task::namespace::Namespace; use apalis_core::worker::WorkerId; use apalis_core::Backend; -use apalis_core::{error::Error, request::Request, task::attempt::Attempt}; +use apalis_core::{error::Error, request::Request}; use chrono::{DateTime, TimeZone, Utc}; pub use cron::Schedule; use std::marker::PhantomData; @@ -102,14 +101,14 @@ where } } } -impl CronStream +impl CronStream where - J: From> + Send + Sync + 'static, + Req: From> + Send + Sync + 'static, Tz: TimeZone + Send + Sync + 'static, Tz::Offset: Send + Sync, { /// Convert to consumable - fn into_stream(self) -> RequestStream> { + fn into_stream(self) -> RequestStream> { let timezone = self.timezone.clone(); let stream = async_stream::stream! { let mut schedule = self.schedule.upcoming_owned(timezone.clone()); @@ -120,10 +119,11 @@ where let to_sleep = next - timezone.from_utc_datetime(&Utc::now().naive_utc()); let to_sleep = to_sleep.to_std().map_err(|e| Error::SourceError(Arc::new(e.into())))?; apalis_core::sleep(to_sleep).await; - let mut data = Extensions::new(); - data.insert(TaskId::new()); - data.insert(Attempt::default()); - yield Ok(Some(Request::new_with_data(J::from(timezone.from_utc_datetime(&Utc::now().naive_utc())), data))); + let timestamp = timezone.from_utc_datetime(&Utc::now().naive_utc()); + let namespace = Namespace(format!("{}:{timestamp:?}", self.schedule)); + let mut req = Request::new(Req::from(timestamp)); + req.parts.namespace = Some(namespace); + yield Ok(Some(req)); }, None => { yield Ok(None); @@ -135,13 +135,13 @@ where } } -impl Backend, Res> for CronStream +impl Backend, Res> for CronStream where - J: From> + Send + Sync + 'static, + Req: From> + Send + Sync + 'static, Tz: TimeZone + Send + Sync + 'static, Tz::Offset: Send + Sync, { - type Stream = RequestStream>; + type Stream = RequestStream>; type Layer = Identity; diff --git a/packages/apalis-redis/src/lib.rs b/packages/apalis-redis/src/lib.rs index 8257be3b..01c8c04d 100644 --- a/packages/apalis-redis/src/lib.rs +++ b/packages/apalis-redis/src/lib.rs @@ -30,6 +30,6 @@ mod storage; pub use storage::connect; pub use storage::Config; -pub use storage::RedisJob; +pub use storage::RedisContext; pub use storage::RedisQueueInfo; pub use storage::RedisStorage; diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index e5e2cf0a..ec595f09 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -1,13 +1,13 @@ use apalis_core::codec::json::JsonCodec; -use apalis_core::data::Extensions; use apalis_core::error::Error; use apalis_core::layers::{Ack, AckLayer, Service}; use apalis_core::poller::controller::Controller; use apalis_core::poller::stream::BackendStream; use apalis_core::poller::Poller; -use apalis_core::request::{Request, RequestStream}; +use apalis_core::request::{Parts, Request, RequestStream}; +use apalis_core::response::Response; +use apalis_core::service_fn::FromRequest; use apalis_core::storage::Storage; -use apalis_core::task::attempt::Attempt; use apalis_core::task::namespace::Namespace; use apalis_core::task::task_id::TaskId; use apalis_core::worker::WorkerId; @@ -92,90 +92,20 @@ struct RedisScript { vacuum: Script, } -/// The actual structure of a Redis job -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct RedisJob { - /// The job context - ctx: Context, - /// The inner job - job: J, -} - -impl RedisJob { - /// Creates a new RedisJob. - pub fn new(job: J, ctx: Context) -> Self { - RedisJob { ctx, job } - } - - /// Gets a reference to the context. - pub fn ctx(&self) -> &Context { - &self.ctx - } - - /// Gets a mutable reference to the context. - pub fn ctx_mut(&mut self) -> &mut Context { - &mut self.ctx - } - - /// Sets the context. - pub fn set_ctx(&mut self, ctx: Context) { - self.ctx = ctx; - } - - /// Gets a reference to the job. - pub fn job(&self) -> &J { - &self.job - } - - /// Gets a mutable reference to the job. - pub fn job_mut(&mut self) -> &mut J { - &mut self.job - } - - /// Sets the job. - pub fn set_job(&mut self, job: J) { - self.job = job; - } - - /// Combines context and job into a tuple. - pub fn into_tuple(self) -> (Context, J) { - (self.ctx, self.job) - } -} - -impl From> for Request { - fn from(val: RedisJob) -> Self { - let mut data = Extensions::new(); - data.insert(val.ctx.id.clone()); - data.insert(val.ctx.attempts.clone()); - data.insert(val.ctx); - Request::new_with_data(val.job, data) - } -} - -impl TryFrom> for RedisJob { - type Error = RedisError; - fn try_from(val: Request) -> Result { - let ctx = val - .get::() - .cloned() - .ok_or((ErrorKind::IoError, "Missing Context"))?; - Ok(RedisJob { - job: val.take(), - ctx, - }) - } -} - +/// The context for a redis storage job #[derive(Clone, Debug, Serialize, Deserialize, Default)] -pub struct Context { - id: TaskId, - attempts: Attempt, +pub struct RedisContext { max_attempts: usize, lock_by: Option, run_at: Option, } +impl FromRequest> for RedisContext { + fn from_request(req: &Request) -> Result { + Ok(req.parts.context.clone()) + } +} + /// Config for a [RedisStorage] #[derive(Clone, Debug)] pub struct Config { @@ -448,18 +378,18 @@ impl RedisStorage { } } -impl Backend, Res> for RedisStorage +impl Backend, Res> for RedisStorage where T: Serialize + DeserializeOwned + Sync + Send + Unpin + 'static, Conn: ConnectionLike + Send + Sync + 'static, Res: Send + Serialize + Sync + 'static, C: Codec> + Send + 'static, { - type Stream = BackendStream>>; + type Stream = BackendStream>>; - type Layer = AckLayer)>, T, Res>; + type Layer = AckLayer)>, T, RedisContext, Res>; - fn poll>>( + fn poll>>( mut self, worker: WorkerId, ) -> Poller { @@ -468,7 +398,7 @@ where let layer = AckLayer::new(ack); let controller = self.controller.clone(); let config = self.config.clone(); - let stream: RequestStream> = Box::pin(rx); + let stream: RequestStream> = Box::pin(rx); let heartbeat = async move { let mut keep_alive_stm = apalis_core::interval::interval(config.keep_alive).fuse(); @@ -536,13 +466,9 @@ where Conn: ConnectionLike + Send + Sync + 'static, C: Codec> + Send, { - type Context = Context; + type Context = RedisContext; type AckError = RedisError; - async fn ack( - &mut self, - ctx: &Self::Context, - res: &Result, - ) -> Result<(), RedisError> { + async fn ack(&mut self, ctx: &Self::Context, res: &Response) -> Result<(), RedisError> { let inflight_set = format!( "{}:{}", self.config.inflight_jobs_set(), @@ -550,8 +476,8 @@ where ); let now: i64 = Utc::now().timestamp(); - - match res { + let task_id = res.task_id.to_string(); + match &res.inner { Ok(success_res) => { let done_job = self.scripts.done_job.clone(); let done_jobs_set = &self.config.done_jobs_set(); @@ -559,7 +485,7 @@ where .key(inflight_set) .key(done_jobs_set) .key(self.config.job_data_hash()) - .arg(ctx.id.to_string()) + .arg(task_id) .arg(now) .arg(C::encode(success_res).map_err(Into::into).unwrap()) .invoke_async(&mut self.conn) @@ -567,27 +493,26 @@ where } Err(e) => match e { Error::Abort(e) => { - let retry_job = self.scripts.retry_job.clone(); - let retry_jobs_set = &self.config.scheduled_jobs_set(); - retry_job + let kill_job = self.scripts.kill_job.clone(); + let kill_jobs_set = &self.config.dead_jobs_set(); + kill_job .key(inflight_set) - .key(retry_jobs_set) + .key(kill_jobs_set) .key(self.config.job_data_hash()) - .arg(ctx.id.to_string()) + .arg(task_id) .arg(now) .arg(e.to_string()) .invoke_async(&mut self.conn) .await } - _ => { - let kill_job = self.scripts.kill_job.clone(); - let kill_jobs_set = &self.config.dead_jobs_set(); - kill_job + let retry_job = self.scripts.retry_job.clone(); + let retry_jobs_set = &self.config.scheduled_jobs_set(); + retry_job .key(inflight_set) - .key(kill_jobs_set) + .key(retry_jobs_set) .key(self.config.job_data_hash()) - .arg(ctx.id.to_string()) + .arg(task_id) .arg(now) .arg(e.to_string()) .invoke_async(&mut self.conn) @@ -604,14 +529,17 @@ where Conn: ConnectionLike + Send + Sync + 'static, C: Codec>, { - async fn fetch_next(&mut self, worker_id: &WorkerId) -> Result>, RedisError> { + async fn fetch_next( + &mut self, + worker_id: &WorkerId, + ) -> Result>, RedisError> { let fetch_jobs = self.scripts.get_jobs.clone(); let consumers_set = self.config.consumers_set(); let active_jobs_list = self.config.active_jobs_list(); let job_data_hash = self.config.job_data_hash(); let inflight_set = format!("{}:{}", self.config.inflight_jobs_set(), worker_id); let signal_list = self.config.signal_list(); - let namespace = self.config.namespace.clone(); + let namespace = &self.config.namespace; let result = fetch_jobs .key(&consumers_set) @@ -629,11 +557,10 @@ where let mut processed = vec![]; for job in jobs { let bytes = deserialize_job(&job)?; - let mut request: RedisJob = C::decode(bytes.to_vec()) + let mut request: Request = C::decode(bytes.to_vec()) .map_err(|e| build_error(&e.into().to_string()))?; - request.ctx_mut().lock_by = Some(worker_id.clone()); - let mut request: Request = request.into(); - request.insert(Namespace(namespace.clone())); + request.parts.context.lock_by = Some(worker_id.clone()); + request.parts.namespace = Some(Namespace(namespace.clone())); processed.push(request) } Ok(processed) @@ -692,53 +619,50 @@ where { type Job = T; type Error = RedisError; - type Identifier = TaskId; + type Context = RedisContext; - async fn push(&mut self, job: Self::Job) -> Result { + async fn push_request( + &mut self, + req: Request, + ) -> Result, RedisError> { let conn = &mut self.conn; let push_job = self.scripts.push_job.clone(); let job_data_hash = self.config.job_data_hash(); let active_jobs_list = self.config.active_jobs_list(); let signal_list = self.config.signal_list(); - let job_id = TaskId::new(); - let ctx = Context { - id: job_id.clone(), - ..Default::default() - }; - let job = C::encode(&RedisJob { ctx, job }) + + let job = C::encode(&req) .map_err(|e| (ErrorKind::IoError, "Encode error", e.into().to_string()))?; push_job .key(job_data_hash) .key(active_jobs_list) .key(signal_list) - .arg(job_id.to_string()) + .arg(req.parts.task_id.to_string()) .arg(job) .invoke_async(conn) .await?; - Ok(job_id.clone()) + Ok(req.parts) } - async fn schedule(&mut self, job: Self::Job, on: i64) -> Result { + async fn schedule_request( + &mut self, + req: Request, + on: i64, + ) -> Result, RedisError> { let schedule_job = self.scripts.schedule_job.clone(); let job_data_hash = self.config.job_data_hash(); let scheduled_jobs_set = self.config.scheduled_jobs_set(); - let job_id = TaskId::new(); - let ctx = Context { - id: job_id.clone(), - ..Default::default() - }; - let job = RedisJob { job, ctx }; - let job = C::encode(&job) + let job = C::encode(&req) .map_err(|e| (ErrorKind::IoError, "Encode error", e.into().to_string()))?; schedule_job .key(job_data_hash) .key(scheduled_jobs_set) - .arg(job_id.to_string()) + .arg(req.parts.task_id.to_string()) .arg(job) .arg(on) .invoke_async(&mut self.conn) .await?; - Ok(job_id.clone()) + Ok(req.parts) } async fn len(&mut self) -> Result { @@ -758,7 +682,7 @@ where async fn fetch_by_id( &mut self, job_id: &TaskId, - ) -> Result>, RedisError> { + ) -> Result>, RedisError> { let data: Value = redis::cmd("HMGET") .arg(&self.config.job_data_hash()) .arg(job_id.to_string()) @@ -766,34 +690,32 @@ where .await?; let bytes = deserialize_job(&data)?; - let inner: RedisJob = C::decode(bytes.to_vec()) + let inner: Request = C::decode(bytes.to_vec()) .map_err(|e| (ErrorKind::IoError, "Decode error", e.into().to_string()))?; - Ok(Some(inner.into())) + Ok(Some(inner)) } - async fn update(&mut self, job: Request) -> Result<(), RedisError> { - let job: RedisJob = job.try_into()?; + async fn update(&mut self, job: Request) -> Result<(), RedisError> { + let task_id = job.parts.task_id.to_string(); let bytes = C::encode(&job) .map_err(|e| (ErrorKind::IoError, "Encode error", e.into().to_string()))?; let _: i64 = redis::cmd("HSET") .arg(&self.config.job_data_hash()) - .arg(job.ctx.id.to_string()) + .arg(task_id) .arg(bytes) .query_async(&mut self.conn) .await?; Ok(()) } - async fn reschedule(&mut self, job: Request, wait: Duration) -> Result<(), RedisError> { + async fn reschedule( + &mut self, + job: Request, + wait: Duration, + ) -> Result<(), RedisError> { let schedule_job = self.scripts.schedule_job.clone(); - let job_id = job - .get::() - .cloned() - .ok_or((ErrorKind::IoError, "Missing TaskId"))?; - let worker_id = job - .get::() - .cloned() - .ok_or((ErrorKind::IoError, "Missing WorkerId"))?; - let job = C::encode::>(job.try_into()?) + let job_id = &job.parts.task_id; + let worker_id = &job.parts.context.lock_by.clone().unwrap(); + let job = C::encode(&job) .map_err(|e| (ErrorKind::IoError, "Encode error", e.into().to_string()))?; let job_data_hash = self.config.job_data_hash(); let scheduled_jobs_set = self.config.scheduled_jobs_set(); @@ -859,7 +781,7 @@ where let conn = &mut self.conn; match res { Some(job) => { - let attempt = job.get::().cloned().unwrap_or_default(); + let attempt = &job.parts.attempt; if attempt.current() >= self.config.max_retries { redis::cmd("ZADD") .arg(failed_jobs_set) @@ -870,7 +792,7 @@ where self.kill(worker_id, task_id).await?; return Ok(1); } - let job = C::encode::>(job.try_into()?) + let job = C::encode(job) .map_err(|e| (ErrorKind::IoError, "Encode error", e.into().to_string()))?; let res: Result = retry_job @@ -1024,7 +946,7 @@ mod tests { async fn consume_one( storage: &mut RedisStorage, worker_id: &WorkerId, - ) -> Request { + ) -> Request { let stream = storage.fetch_next(worker_id); stream .await @@ -1052,7 +974,10 @@ mod tests { storage.push(email).await.expect("failed to push a job"); } - async fn get_job(storage: &mut RedisStorage, job_id: &TaskId) -> Request { + async fn get_job( + storage: &mut RedisStorage, + job_id: &TaskId, + ) -> Request { storage .fetch_by_id(job_id) .await @@ -1078,14 +1003,17 @@ mod tests { let worker_id = register_worker(&mut storage).await; let job = consume_one(&mut storage, &worker_id).await; - let ctx = job.get::().unwrap(); - + let ctx = &job.parts.context; + let res = 42usize; storage - .ack(ctx, &Ok(())) + .ack( + ctx, + &Response::success(res, job.parts.task_id.clone(), job.parts.attempt.clone()), + ) .await .expect("failed to acknowledge the job"); - let _job = get_job(&mut storage, &ctx.id).await; + let _job = get_job(&mut storage, &job.parts.task_id).await; } #[tokio::test] @@ -1097,7 +1025,7 @@ mod tests { let worker_id = register_worker(&mut storage).await; let job = consume_one(&mut storage, &worker_id).await; - let job_id = &job.get::().unwrap().id; + let job_id = &job.parts.task_id; storage .kill(&worker_id, &job_id) diff --git a/packages/apalis-sql/src/context.rs b/packages/apalis-sql/src/context.rs index fbbf77ee..44cb0632 100644 --- a/packages/apalis-sql/src/context.rs +++ b/packages/apalis-sql/src/context.rs @@ -1,5 +1,6 @@ use apalis_core::error::Error; -use apalis_core::task::{attempt::Attempt, task_id::TaskId}; +use apalis_core::request::Request; +use apalis_core::service_fn::FromRequest; use apalis_core::worker::WorkerId; use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; @@ -7,12 +8,10 @@ use std::{fmt, str::FromStr}; /// The context for a job is represented here /// Used to provide a context for a job with an sql backend -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Default)] pub struct SqlContext { - id: TaskId, status: State, run_at: DateTime, - attempts: Attempt, max_attempts: i32, last_error: Option, lock_at: Option, @@ -21,15 +20,13 @@ pub struct SqlContext { } impl SqlContext { - /// Build a new context with defaults given an ID. - pub fn new(id: TaskId) -> Self { + /// Build a new context with defaults + pub fn new() -> Self { SqlContext { - id, status: State::Pending, run_at: Utc::now(), lock_at: None, done_at: None, - attempts: Default::default(), max_attempts: 25, last_error: None, lock_by: None, @@ -46,21 +43,6 @@ impl SqlContext { self.max_attempts } - /// Get the id for a job - pub fn id(&self) -> &TaskId { - &self.id - } - - /// Gets the current attempts for a job. Default 0 - pub fn attempts(&self) -> &Attempt { - &self.attempts - } - - /// Set the number of attempts - pub fn set_attempts(&mut self, attempts: i32) { - self.attempts = Attempt::new_with_value(attempts.try_into().unwrap()); - } - /// Get the time a job was done pub fn done_at(&self) -> &Option { &self.done_at @@ -120,10 +102,11 @@ impl SqlContext { pub fn set_last_error(&mut self, error: Option) { self.last_error = error; } +} - /// Record an attempt to execute the request - pub fn record_attempt(&mut self) { - self.attempts.increment(); +impl FromRequest> for SqlContext { + fn from_request(req: &Request) -> Result { + Ok(req.parts.context.clone()) } } @@ -159,7 +142,7 @@ impl FromStr for State { "Done" => Ok(State::Done), "Failed" => Ok(State::Failed), "Killed" => Ok(State::Killed), - _ => Err(Error::MissingContext("Invalid Job state".to_string())), + _ => Err(Error::MissingData("Invalid Job state".to_string())), } } } diff --git a/packages/apalis-sql/src/from_row.rs b/packages/apalis-sql/src/from_row.rs index bcc2a655..d242e4ae 100644 --- a/packages/apalis-sql/src/from_row.rs +++ b/packages/apalis-sql/src/from_row.rs @@ -1,5 +1,7 @@ +use apalis_core::request::Parts; +use apalis_core::task::attempt::Attempt; use apalis_core::task::task_id::TaskId; -use apalis_core::{data::Extensions, request::Request, worker::WorkerId}; +use apalis_core::{request::Request, worker::WorkerId}; use serde::{Deserialize, Serialize}; use sqlx::{Decode, Type}; @@ -8,60 +10,43 @@ use crate::context::SqlContext; /// Wrapper for [Request] #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SqlRequest { - req: T, - context: SqlContext, + pub(crate) req: Request, } impl SqlRequest { /// Creates a new SqlRequest. - pub fn new(req: T, context: SqlContext) -> Self { - SqlRequest { req, context } + pub fn new(req: Request) -> Self { + SqlRequest { req } } /// Gets a reference to the request. pub fn req(&self) -> &T { - &self.req + &self.req.args } /// Gets a mutable reference to the request. pub fn req_mut(&mut self) -> &mut T { - &mut self.req + &mut self.req.args } /// Sets the request. pub fn set_req(&mut self, req: T) { - self.req = req; + self.req.args = req; } /// Gets a reference to the context. pub fn context(&self) -> &SqlContext { - &self.context + &self.req.parts.context } /// Gets a mutable reference to the context. pub fn context_mut(&mut self) -> &mut SqlContext { - &mut self.context + &mut self.req.parts.context } /// Sets the context. pub fn set_context(&mut self, context: SqlContext) { - self.context = context; - } - - /// Combines request and context into a tuple. - pub fn into_tuple(self) -> (T, SqlContext) { - (self.req, self.context) - } -} - -impl From> for Request { - fn from(val: SqlRequest) -> Self { - let mut data = Extensions::new(); - data.insert(val.context.id().clone()); - data.insert(val.context.attempts().clone()); - data.insert(val.context); - - Request::new_with_data(val.req, data) + self.req.parts.context = context; } } @@ -76,19 +61,22 @@ impl<'r, T: Decode<'r, sqlx::Sqlite> + Type> use std::str::FromStr; let job: T = row.try_get("job")?; - let id: TaskId = + let task_id: TaskId = TaskId::from_str(row.try_get("id")?).map_err(|e| sqlx::Error::ColumnDecode { index: "id".to_string(), source: Box::new(e), })?; - let mut context = crate::context::SqlContext::new(id); + let mut parts = Parts::::default(); + parts.task_id = task_id; + + let attempt: i32 = row.try_get("attempts").unwrap_or(0); + parts.attempt = Attempt::new_with_value(attempt as usize); + + let mut context = crate::context::SqlContext::new(); let run_at: i64 = row.try_get("run_at")?; context.set_run_at(DateTime::from_timestamp(run_at, 0).unwrap_or_default()); - let attempts = row.try_get("attempts").unwrap_or(0); - context.set_attempts(attempts); - let max_attempts = row.try_get("max_attempts").unwrap_or(25); context.set_max_attempts(max_attempts); @@ -118,8 +106,10 @@ impl<'r, T: Decode<'r, sqlx::Sqlite> + Type> source: "Could not parse lock_by as a WorkerId".into(), })?, ); - - Ok(SqlRequest { context, req: job }) + parts.context = context; + Ok(SqlRequest { + req: Request::new_with_parts(job, parts), + }) } } @@ -134,19 +124,21 @@ impl<'r, T: Decode<'r, sqlx::Postgres> + Type> use std::str::FromStr; let job: T = row.try_get("job")?; - let id: TaskId = + let task_id: TaskId = TaskId::from_str(row.try_get("id")?).map_err(|e| sqlx::Error::ColumnDecode { index: "id".to_string(), source: Box::new(e), })?; - let mut context = SqlContext::new(id); + let mut parts = Parts::::default(); + parts.task_id = task_id; + + let attempt: i32 = row.try_get("attempts").unwrap_or(0); + parts.attempt = Attempt::new_with_value(attempt as usize); + let mut context = SqlContext::new(); let run_at = row.try_get("run_at")?; context.set_run_at(run_at); - let attempts = row.try_get("attempts").unwrap_or(0); - context.set_attempts(attempts); - let max_attempts = row.try_get("max_attempts").unwrap_or(25); context.set_max_attempts(max_attempts); @@ -176,7 +168,10 @@ impl<'r, T: Decode<'r, sqlx::Postgres> + Type> source: "Could not parse lock_by as a WorkerId".into(), })?, ); - Ok(SqlRequest { context, req: job }) + parts.context = context; + Ok(SqlRequest { + req: Request::new_with_parts(job, parts), + }) } } @@ -189,19 +184,22 @@ impl<'r, T: Decode<'r, sqlx::MySql> + Type> sqlx::FromRow<'r, sqlx: use sqlx::Row; use std::str::FromStr; let job: T = row.try_get("job")?; - let id: TaskId = + let task_id: TaskId = TaskId::from_str(row.try_get("id")?).map_err(|e| sqlx::Error::ColumnDecode { index: "id".to_string(), source: Box::new(e), })?; - let mut context = SqlContext::new(id); + let mut parts = Parts::::default(); + parts.task_id = task_id; + + let attempt: i32 = row.try_get("attempts").unwrap_or(0); + parts.attempt = Attempt::new_with_value(attempt as usize); + + let mut context = SqlContext::new(); let run_at = row.try_get("run_at")?; context.set_run_at(run_at); - let attempts = row.try_get("attempts").unwrap_or(0); - context.set_attempts(attempts); - let max_attempts = row.try_get("max_attempts").unwrap_or(25); context.set_max_attempts(max_attempts); @@ -231,7 +229,9 @@ impl<'r, T: Decode<'r, sqlx::MySql> + Type> sqlx::FromRow<'r, sqlx: source: "Could not parse lock_by as a WorkerId".into(), })?, ); - - Ok(SqlRequest { context, req: job }) + parts.context = context; + Ok(SqlRequest { + req: Request::new_with_parts(job, parts), + }) } } diff --git a/packages/apalis-sql/src/lib.rs b/packages/apalis-sql/src/lib.rs index b671bb1b..012dfcd1 100644 --- a/packages/apalis-sql/src/lib.rs +++ b/packages/apalis-sql/src/lib.rs @@ -12,6 +12,7 @@ use std::time::Duration; +use apalis_core::error::Error; use context::State; /// The context of the sql job @@ -130,11 +131,11 @@ impl Config { } /// Calculates the status from a result -pub fn calculate_status(res: &Result) -> State { +pub fn calculate_status(res: &Result) -> State { match res { Ok(_) => State::Done, Err(e) => match &e { - _ if e.to_string().starts_with("AbortError") => State::Killed, + Error::Abort(_) => State::Killed, _ => State::Failed, }, } @@ -144,7 +145,8 @@ pub fn calculate_status(res: &Result) -> St #[macro_export] macro_rules! sql_storage_tests { ($setup:path, $storage_type:ty, $job_type:ty) => { - async fn setup_test_wrapper() -> TestWrapper<$storage_type, $job_type, ()> { + async fn setup_test_wrapper( + ) -> TestWrapper<$storage_type, Request<$job_type, SqlContext>, ()> { let (mut t, poller) = TestWrapper::new_with_service( $setup().await, apalis_core::service_fn::service_fn(email_service::send_email), @@ -166,10 +168,14 @@ macro_rules! sql_storage_tests { let (job_id, res) = storage.execute_next().await; assert_eq!(res, Err("AbortError: Invalid character.".to_owned())); apalis_core::sleep(Duration::from_secs(1)).await; - let job = storage.fetch_by_id(&job_id).await.unwrap().unwrap(); - let ctx = job.get::().unwrap(); + let job = storage + .fetch_by_id(&job_id) + .await + .unwrap() + .expect("No job found"); + let ctx = job.parts.context; assert_eq!(*ctx.status(), State::Killed); - assert!(ctx.done_at().is_some()); + // assert!(ctx.done_at().is_some()); assert_eq!( ctx.last_error().clone().unwrap(), "{\"Err\":\"AbortError: Invalid character.\"}" @@ -188,7 +194,7 @@ macro_rules! sql_storage_tests { assert_eq!(res, Ok("()".to_owned())); apalis_core::sleep(Duration::from_secs(1)).await; let job = storage.fetch_by_id(&job_id).await.unwrap().unwrap(); - let ctx = job.get::().unwrap(); + let ctx = job.parts.context; assert_eq!(*ctx.status(), State::Done); assert!(ctx.done_at().is_some()); } @@ -209,9 +215,9 @@ macro_rules! sql_storage_tests { ); apalis_core::sleep(Duration::from_secs(1)).await; let job = storage.fetch_by_id(&job_id).await.unwrap().unwrap(); - let ctx = job.get::().unwrap(); + let ctx = job.parts.context; assert_eq!(*ctx.status(), State::Failed); - assert!(ctx.attempts().current() >= 1); + assert!(job.parts.attempt.current() >= 1); assert_eq!( ctx.last_error().clone().unwrap(), "{\"Err\":\"FailedError: Missing separator character '@'.\"}" diff --git a/packages/apalis-sql/src/mysql.rs b/packages/apalis-sql/src/mysql.rs index 1a88cf50..f42dca31 100644 --- a/packages/apalis-sql/src/mysql.rs +++ b/packages/apalis-sql/src/mysql.rs @@ -5,7 +5,8 @@ use apalis_core::notify::Notify; use apalis_core::poller::controller::Controller; use apalis_core::poller::stream::BackendStream; use apalis_core::poller::Poller; -use apalis_core::request::{Request, RequestStream}; +use apalis_core::request::{Parts, Request, RequestStream}; +use apalis_core::response::Response; use apalis_core::storage::Storage; use apalis_core::task::namespace::Namespace; use apalis_core::task::task_id::TaskId; @@ -43,7 +44,7 @@ where controller: Controller, config: Config, codec: PhantomData, - ack_notify: Notify<(SqlContext, Result)>, + ack_notify: Notify<(SqlContext, Response)>, } impl fmt::Debug for MysqlStorage @@ -137,13 +138,10 @@ where worker_id: &WorkerId, interval: Duration, buffer_size: usize, - config: &Config, - ) -> impl Stream>, sqlx::Error>> { + ) -> impl Stream>, sqlx::Error>> { let pool = self.pool.clone(); let worker_id = worker_id.to_string(); - let config = config.clone(); try_stream! { - let pool = pool.clone(); let buffer_size = u32::try_from(buffer_size) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidInput, e)))?; loop { @@ -180,13 +178,12 @@ where for job in jobs { yield { - let (req, ctx) = job.into_tuple(); + let (req, ctx) = job.req.take_parts(); let req = C::decode(req) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e))) .unwrap(); - let req = SqlRequest::new(req, ctx); - let mut req: Request = req.into(); - req.insert(Namespace(config.namespace.clone())); + let mut req: Request = Request::new_with_parts(req, ctx); + req.parts.namespace = Some(Namespace(self.config.namespace.clone())); Some(req) } } @@ -228,50 +225,56 @@ where type Error = sqlx::Error; - type Identifier = TaskId; + type Context = SqlContext; - async fn push(&mut self, job: Self::Job) -> Result { - let id = TaskId::new(); + async fn push_request( + &mut self, + job: Request, + ) -> Result, sqlx::Error> { + let (args, parts) = job.take_parts(); let query = "INSERT INTO jobs VALUES (?, ?, ?, 'Pending', 0, 25, now(), NULL, NULL, NULL, NULL)"; let pool = self.pool.clone(); - let job = C::encode(job) + let job = C::encode(args) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; let job_type = self.config.namespace.clone(); sqlx::query(query) .bind(job) - .bind(id.to_string()) + .bind(parts.task_id.to_string()) .bind(job_type.to_string()) .execute(&pool) .await?; - Ok(id) + Ok(parts) } - async fn schedule(&mut self, job: Self::Job, on: i64) -> Result { + async fn schedule_request( + &mut self, + req: Request, + on: i64, + ) -> Result, sqlx::Error> { let query = "INSERT INTO jobs VALUES (?, ?, ?, 'Pending', 0, 25, ?, NULL, NULL, NULL, NULL)"; let pool = self.pool.clone(); - let id = TaskId::new(); - let job = C::encode(job) + let args = C::encode(&req.args) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; let job_type = self.config.namespace.clone(); sqlx::query(query) - .bind(job) - .bind(id.to_string()) + .bind(args) + .bind(req.parts.task_id.to_string()) .bind(job_type) .bind(on) .execute(&pool) .await?; - Ok(id) + Ok(req.parts) } async fn fetch_by_id( &mut self, job_id: &TaskId, - ) -> Result>, sqlx::Error> { + ) -> Result>, sqlx::Error> { let pool = self.pool.clone(); let fetch_query = "SELECT * FROM jobs WHERE id = ?"; @@ -282,12 +285,11 @@ where match res { None => Ok(None), Some(job) => Ok(Some({ - let (req, ctx) = job.into_tuple(); + let (req, parts) = job.req.take_parts(); let req = C::decode(req) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; - let req = SqlRequest::new(req, ctx); - let mut req: Request = req.into(); - req.insert(Namespace(self.config.namespace.clone())); + let mut req = Request::new_with_parts(req, parts); + req.parts.namespace = Some(Namespace(self.config.namespace.clone())); req })), } @@ -301,12 +303,13 @@ where record.try_get("count") } - async fn reschedule(&mut self, job: Request, wait: Duration) -> Result<(), sqlx::Error> { + async fn reschedule( + &mut self, + job: Request, + wait: Duration, + ) -> Result<(), sqlx::Error> { let pool = self.pool.clone(); - let job_id = job.get::().ok_or(sqlx::Error::Io(io::Error::new( - io::ErrorKind::InvalidData, - "Missing TaskId", - )))?; + let job_id = job.parts.task_id.clone(); let wait: i64 = wait .as_secs() @@ -324,21 +327,16 @@ where Ok(()) } - async fn update(&mut self, job: Request) -> Result<(), sqlx::Error> { + async fn update(&mut self, job: Request) -> Result<(), sqlx::Error> { let pool = self.pool.clone(); - let ctx = job - .get::() - .ok_or(sqlx::Error::Io(io::Error::new( - io::ErrorKind::InvalidData, - "Missing TaskId", - )))?; + let ctx = job.parts.context; let status = ctx.status().to_string(); - let attempts = ctx.attempts(); + let attempts = job.parts.attempt; let done_at = *ctx.done_at(); let lock_by = ctx.lock_by().clone(); let lock_at = *ctx.lock_at(); let last_error = ctx.last_error().clone(); - let job_id = ctx.id(); + let job_id = job.parts.task_id; let mut tx = pool.acquire().await?; let query = "UPDATE jobs SET status = ?, attempts = ?, done_at = ?, lock_by = ?, lock_at = ?, last_error = ? WHERE id = ?"; @@ -372,14 +370,14 @@ where } } -impl Backend, Res> for MysqlStorage +impl Backend, Res> for MysqlStorage where - T: Serialize + DeserializeOwned + Sync + Send + Unpin + 'static, + Req: Serialize + DeserializeOwned + Sync + Send + Unpin + 'static, C: Debug + Codec + Clone + Send + 'static, { - type Stream = BackendStream>>; + type Stream = BackendStream>>; - type Layer = AckLayer, T, Res>; + type Layer = AckLayer, Req, SqlContext, Res>; fn poll(self, worker: WorkerId) -> Poller { let layer = AckLayer::new(self.clone()); @@ -389,7 +387,7 @@ where let ack_notify = self.ack_notify.clone(); let mut hb_storage = self.clone(); let stream = self - .stream_jobs(&worker, config.poll_interval, config.buffer_size, &config) + .stream_jobs(&worker, config.poll_interval, config.buffer_size) .map_err(|e| Error::SourceError(Arc::new(Box::new(e)))); let stream = BackendStream::new(stream.boxed(), controller); @@ -404,13 +402,13 @@ where let query = "UPDATE jobs SET status = ?, done_at = now(), last_error = ?, attempts = ? WHERE id = ? AND lock_by = ?"; let query = sqlx::query(query); let query = query - .bind(calculate_status(&res).to_string()) + .bind(calculate_status(&res.inner).to_string()) .bind( - serde_json::to_string(&res.as_ref().map_err(|e| e.to_string())) + serde_json::to_string(&res.inner.as_ref().map_err(|e| e.to_string())) .unwrap(), ) - .bind(ctx.attempts().current() as u64 + 1) - .bind(ctx.id().to_string()) + .bind(res.attempt.current() as u64 + 1) + .bind(res.task_id.to_string()) .bind(ctx.lock_by().as_ref().unwrap().to_string()); if let Err(e) = query.execute(&pool).await { error!("Ack failed: {e}"); @@ -445,21 +443,13 @@ where T: Sync + Send, Res: Serialize + Send + 'static + Sync, C: Codec + Send, + C::Error: Debug, { type Context = SqlContext; type AckError = sqlx::Error; - async fn ack( - &mut self, - ctx: &Self::Context, - res: &Result, - ) -> Result<(), sqlx::Error> { + async fn ack(&mut self, ctx: &Self::Context, res: &Response) -> Result<(), sqlx::Error> { self.ack_notify - .notify(( - ctx.clone(), - res.as_ref() - .map_err(|c| c.clone()) - .and_then(|r| C::encode(r).map_err(|e| Error::SourceError(Arc::new(e.into())))), - )) + .notify((ctx.clone(), res.map(|res| C::encode(res).unwrap()))) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::BrokenPipe, e)))?; Ok(()) @@ -536,7 +526,6 @@ mod tests { use crate::sql_storage_tests; use super::*; - use apalis_core::task::attempt::Attempt; use apalis_core::test_utils::DummyService; use email_service::Email; @@ -587,13 +576,11 @@ mod tests { async fn consume_one( storage: &mut MysqlStorage, worker_id: &WorkerId, - ) -> Request { - let mut stream = storage.clone().stream_jobs( - worker_id, - std::time::Duration::from_secs(10), - 1, - &Config::default(), - ); + ) -> Request { + let mut stream = + storage + .clone() + .stream_jobs(worker_id, std::time::Duration::from_secs(10), 1); stream .next() .await @@ -633,7 +620,10 @@ mod tests { storage.push(email).await.expect("failed to push a job"); } - async fn get_job(storage: &mut MysqlStorage, job_id: &TaskId) -> Request { + async fn get_job( + storage: &mut MysqlStorage, + job_id: &TaskId, + ) -> Request { // add a slight delay to allow background actions like ack to complete apalis_core::sleep(Duration::from_secs(1)).await; storage @@ -651,7 +641,7 @@ mod tests { let worker_id = register_worker(&mut storage).await; let job = consume_one(&mut storage, &worker_id).await; - let ctx = job.get::().unwrap(); + let ctx = job.parts.context; // TODO: Fix assertions assert_eq!(*ctx.status(), State::Running); assert_eq!(*ctx.lock_by(), Some(worker_id.clone())); @@ -668,8 +658,7 @@ mod tests { let job = consume_one(&mut storage, &worker_id).await; - let ctx = job.get::().unwrap(); - let job_id = ctx.id(); + let job_id = &job.parts.task_id; storage .kill(&worker_id, job_id) @@ -677,7 +666,7 @@ mod tests { .expect("failed to kill job"); let job = get_job(&mut storage, job_id).await; - let ctx = job.get::().unwrap(); + let ctx = job.parts.context; // TODO: Fix assertions assert_eq!(*ctx.status(), State::Killed); assert!(ctx.done_at().is_some()); @@ -705,15 +694,19 @@ mod tests { // fetch job let job = consume_one(&mut storage, &worker_id).await; - let ctx = job.get::().unwrap(); + let ctx = job.parts.context; assert_eq!(*ctx.status(), State::Running); storage.reenqueue_orphaned(300).await.unwrap(); // then, the job status has changed to Pending - let job = storage.fetch_by_id(ctx.id()).await.unwrap().unwrap(); - let context = job.get::().unwrap(); + let job = storage + .fetch_by_id(&job.parts.task_id) + .await + .unwrap() + .unwrap(); + let context = job.parts.context; assert_eq!(*context.status(), State::Pending); assert!(context.lock_by().is_none()); assert!(context.lock_at().is_none()); @@ -742,7 +735,7 @@ mod tests { // fetch job let job = consume_one(&mut storage, &worker_id).await; - let ctx = job.get::().unwrap(); + let ctx = &job.parts.context; assert_eq!(*ctx.status(), State::Running); @@ -750,8 +743,12 @@ mod tests { storage.reenqueue_orphaned(300).await.unwrap(); // then, the job status is not changed - let job = storage.fetch_by_id(ctx.id()).await.unwrap().unwrap(); - let context = job.get::().unwrap(); + let job = storage + .fetch_by_id(&job.parts.task_id) + .await + .unwrap() + .unwrap(); + let context = job.parts.context; // TODO: Fix assertions assert_eq!(*context.status(), State::Running); assert_eq!(*context.lock_by(), Some(worker_id.clone())); diff --git a/packages/apalis-sql/src/postgres.rs b/packages/apalis-sql/src/postgres.rs index 53b2bcb1..72cb1abb 100644 --- a/packages/apalis-sql/src/postgres.rs +++ b/packages/apalis-sql/src/postgres.rs @@ -47,7 +47,8 @@ use apalis_core::notify::Notify; use apalis_core::poller::controller::Controller; use apalis_core::poller::stream::BackendStream; use apalis_core::poller::Poller; -use apalis_core::request::{Request, RequestStream}; +use apalis_core::request::{Parts, Request, RequestStream}; +use apalis_core::response::Response; use apalis_core::storage::Storage; use apalis_core::task::namespace::Namespace; use apalis_core::task::task_id::TaskId; @@ -86,7 +87,7 @@ where codec: PhantomData, config: Config, controller: Controller, - ack_notify: Notify<(SqlContext, Result)>, + ack_notify: Notify<(SqlContext, Response)>, subscription: Option, } @@ -117,14 +118,14 @@ impl fmt::Debug for PostgresStorage { } } -impl Backend, Res> for PostgresStorage +impl Backend, Res> for PostgresStorage where T: Serialize + DeserializeOwned + Sync + Send + Unpin + 'static, C: Codec + Send + 'static, { - type Stream = BackendStream>>; + type Stream = BackendStream>>; - type Layer = AckLayer, T, Res>; + type Layer = AckLayer, T, SqlContext, Res>; fn poll(mut self, worker: WorkerId) -> Poller { let layer = AckLayer::new(self.clone()); @@ -150,7 +151,7 @@ where >( storage: &mut PostgresStorage, worker: &WorkerId, - tx: &mut mpsc::Sender>, Error>>, + tx: &mut mpsc::Sender>, Error>>, ) -> Result<(), Error> { let res = storage .fetch_next(worker) @@ -181,7 +182,7 @@ where ids = ack_stream.next() => { if let Some(ids) = ids { let ack_ids: Vec<(String, String, String, String, u64)> = ids.iter().map(|(ctx, res)| { - (ctx.id().to_string(), ctx.lock_by().clone().unwrap().to_string(), serde_json::to_string(&res.as_ref().map_err(|e| e.to_string())).unwrap(), calculate_status(res).to_string(), (ctx.attempts().current() + 1) as u64 ) + (res.task_id.to_string(), ctx.lock_by().clone().unwrap().to_string(), serde_json::to_string(&res.inner.as_ref().map_err(|e| e.to_string())).unwrap(), calculate_status(&res.inner).to_string(), (res.attempt.current() + 1) as u64 ) }).collect(); let query = "UPDATE apalis.jobs @@ -369,7 +370,10 @@ where T: DeserializeOwned + Send + Unpin + 'static, C: Codec, { - async fn fetch_next(&mut self, worker_id: &WorkerId) -> Result>, sqlx::Error> { + async fn fetch_next( + &mut self, + worker_id: &WorkerId, + ) -> Result>, sqlx::Error> { let config = &self.config; let job_type = &config.namespace; let fetch_query = "Select * from apalis.get_jobs($1, $2, $3);"; @@ -386,13 +390,12 @@ where let jobs: Vec<_> = jobs .into_iter() .map(|job| { - let (req, ctx) = job.into_tuple(); + let (req, parts) = job.req.take_parts(); let req = C::decode(req) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e))) .unwrap(); - let req = SqlRequest::new(req, ctx); - let mut req: Request = req.into(); - req.insert(Namespace(self.config.namespace.clone())); + let mut req = Request::new_with_parts(req, parts); + req.parts.namespace = Some(Namespace(self.config.namespace.clone())); req }) .collect(); @@ -400,16 +403,16 @@ where } } -impl Storage for PostgresStorage +impl Storage for PostgresStorage where - T: Serialize + DeserializeOwned + Send + 'static + Unpin + Sync, + Req: Serialize + DeserializeOwned + Send + 'static + Unpin + Sync, C: Codec + Send + 'static, { - type Job = T; + type Job = Req; type Error = sqlx::Error; - type Identifier = TaskId; + type Context = SqlContext; /// Push a job to Postgres [Storage] /// @@ -418,46 +421,52 @@ where /// ```sql /// Select apalis.push_job(job_type::text, job::json); /// ``` - async fn push(&mut self, job: Self::Job) -> Result { - let id = TaskId::new(); + async fn push_request( + &mut self, + req: Request, + ) -> Result, sqlx::Error> { let query = "INSERT INTO apalis.jobs VALUES ($1, $2, $3, 'Pending', 0, 25, NOW() , NULL, NULL, NULL, NULL)"; - let job = C::encode(&job) + let args = C::encode(&req.args) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; let job_type = self.config.namespace.clone(); sqlx::query(query) - .bind(job) - .bind(id.to_string()) + .bind(args) + .bind(&req.parts.task_id.to_string()) .bind(&job_type) .execute(&self.pool) .await?; - Ok(id) + Ok(req.parts) } - async fn schedule(&mut self, job: Self::Job, on: Timestamp) -> Result { + async fn schedule_request( + &mut self, + req: Request, + on: Timestamp, + ) -> Result, sqlx::Error> { let query = "INSERT INTO apalis.jobs VALUES ($1, $2, $3, 'Pending', 0, 25, $4, NULL, NULL, NULL, NULL)"; - - let id = TaskId::new(); + let task_id = req.parts.task_id.to_string(); + let parts = req.parts; let on = DateTime::from_timestamp(on, 0); - let job = C::encode(&job) + let job = C::encode(&req.args) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidInput, e)))?; let job_type = self.config.namespace.clone(); sqlx::query(query) .bind(job) - .bind(id.to_string()) + .bind(task_id) .bind(job_type) .bind(on) .execute(&self.pool) .await?; - Ok(id) + Ok(parts) } async fn fetch_by_id( &mut self, job_id: &TaskId, - ) -> Result>, sqlx::Error> { - let fetch_query = "SELECT * FROM apalis.jobs WHERE id = $1"; + ) -> Result>, sqlx::Error> { + let fetch_query = "SELECT * FROM apalis.jobs WHERE id = $1 LIMIT 1"; let res: Option> = sqlx::query_as(fetch_query) .bind(job_id.to_string()) .fetch_optional(&self.pool) @@ -466,12 +475,12 @@ where match res { None => Ok(None), Some(job) => Ok(Some({ - let (req, ctx) = job.into_tuple(); - let req = C::decode(req) + let (req, parts) = job.req.take_parts(); + let args = C::decode(req) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; - let req = SqlRequest::new(req, ctx); - let mut req: Request = req.into(); - req.insert(Namespace(self.config.namespace.clone())); + + let mut req: Request = Request::new_with_parts(args, parts); + req.parts.namespace = Some(Namespace(self.config.namespace.clone())); req })), } @@ -483,14 +492,12 @@ where record.try_get("count") } - async fn reschedule(&mut self, job: Request, wait: Duration) -> Result<(), sqlx::Error> { - let ctx = job - .get::() - .ok_or(sqlx::Error::Io(io::Error::new( - io::ErrorKind::InvalidData, - "Missing SqlContext", - )))?; - let job_id = ctx.id(); + async fn reschedule( + &mut self, + job: Request, + wait: Duration, + ) -> Result<(), sqlx::Error> { + let job_id = job.parts.task_id; let on = Utc::now() + wait; let mut tx = self.pool.acquire().await?; let query = @@ -504,17 +511,13 @@ where Ok(()) } - async fn update(&mut self, job: Request) -> Result<(), sqlx::Error> { - let ctx = job - .get::() - .ok_or(sqlx::Error::Io(io::Error::new( - io::ErrorKind::InvalidData, - "Missing SqlContext", - )))?; - let job_id = ctx.id(); + async fn update(&mut self, job: Request) -> Result<(), sqlx::Error> { + let ctx = job.parts.context; + let job_id = job.parts.task_id; let status = ctx.status().to_string(); - let attempts: i32 = ctx - .attempts() + let attempts: i32 = job + .parts + .attempt .current() .try_into() .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; @@ -553,29 +556,19 @@ where impl Ack for PostgresStorage where T: Sync + Send, - Res: Serialize + Sync, + Res: Serialize + Sync + Clone, C: Codec + Send, { type Context = SqlContext; type AckError = sqlx::Error; - async fn ack( - &mut self, - ctx: &Self::Context, - res: &Result, - ) -> Result<(), sqlx::Error> { + async fn ack(&mut self, ctx: &Self::Context, res: &Response) -> Result<(), sqlx::Error> { + let res = res.clone().map(|r| { + C::encode(r) + .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::Interrupted, e))) + .unwrap() + }); self.ack_notify - .notify(( - ctx.clone(), - res.as_ref() - .map(|r| { - C::encode(r) - .map_err(|e| { - sqlx::Error::Io(io::Error::new(io::ErrorKind::Interrupted, e)) - }) - .unwrap() - }) - .map_err(|e| e.clone()), - )) + .notify((ctx.clone(), res)) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::Interrupted, e)))?; Ok(()) @@ -662,7 +655,8 @@ mod tests { // (different runtimes are created for each test), // we don't share the storage and tests must be run sequentially. PostgresStorage::setup(&pool).await.unwrap(); - let mut storage = PostgresStorage::new(pool); + let config = Config::new("apalis-ci-tests").set_buffer_size(1); + let mut storage = PostgresStorage::new_with_config(pool, config); cleanup(&mut storage, &WorkerId::new("test-worker")).await; storage } @@ -703,7 +697,7 @@ mod tests { async fn consume_one( storage: &mut PostgresStorage, worker_id: &WorkerId, - ) -> Request { + ) -> Request { let req = storage.fetch_next(worker_id).await; req.unwrap()[0].clone() } @@ -729,7 +723,10 @@ mod tests { storage.push(email).await.expect("failed to push a job"); } - async fn get_job(storage: &mut PostgresStorage, job_id: &TaskId) -> Request { + async fn get_job( + storage: &mut PostgresStorage, + job_id: &TaskId, + ) -> Request { // add a slight delay to allow background actions like ack to complete apalis_core::sleep(Duration::from_secs(2)).await; storage @@ -747,11 +744,11 @@ mod tests { let worker_id = register_worker(&mut storage).await; let job = consume_one(&mut storage, &worker_id).await; - let ctx = job.get::().unwrap(); - let job_id = ctx.id(); + let job_id = &job.parts.task_id; + // Refresh our job let job = get_job(&mut storage, job_id).await; - let ctx = job.get::().unwrap(); + let ctx = job.parts.context; assert_eq!(*ctx.status(), State::Running); assert_eq!(*ctx.lock_by(), Some(worker_id.clone())); assert!(ctx.lock_at().is_some()); @@ -766,8 +763,7 @@ mod tests { let worker_id = register_worker(&mut storage).await; let job = consume_one(&mut storage, &worker_id).await; - let ctx = job.get::().unwrap(); - let job_id = ctx.id(); + let job_id = &job.parts.task_id; storage .kill(&worker_id, job_id) @@ -775,7 +771,7 @@ mod tests { .expect("failed to kill job"); let job = get_job(&mut storage, job_id).await; - let ctx = job.get::().unwrap(); + let ctx = job.parts.context; assert_eq!(*ctx.status(), State::Killed); assert!(ctx.done_at().is_some()); } @@ -793,10 +789,9 @@ mod tests { .reenqueue_orphaned(5) .await .expect("failed to heartbeat"); - let ctx = job.get::().unwrap(); - let job_id = ctx.id(); + let job_id = &job.parts.task_id; let job = get_job(&mut storage, job_id).await; - let ctx = job.get::().unwrap(); + let ctx = job.parts.context; assert_eq!(*ctx.status(), State::Pending); assert!(ctx.done_at().is_none()); @@ -816,7 +811,7 @@ mod tests { let worker_id = register_worker_at(&mut storage, four_minutes_ago.timestamp()).await; let job = consume_one(&mut storage, &worker_id).await; - let ctx = job.get::().unwrap(); + let ctx = &job.parts.context; assert_eq!(*ctx.status(), State::Running); storage @@ -824,9 +819,9 @@ mod tests { .await .expect("failed to heartbeat"); - let job_id = ctx.id(); + let job_id = &job.parts.task_id; let job = get_job(&mut storage, job_id).await; - let ctx = job.get::().unwrap(); + let ctx = job.parts.context; assert_eq!(*ctx.status(), State::Running); assert_eq!(*ctx.lock_by(), Some(worker_id.clone())); diff --git a/packages/apalis-sql/src/sqlite.rs b/packages/apalis-sql/src/sqlite.rs index 27dcb2af..c4cdb7cc 100644 --- a/packages/apalis-sql/src/sqlite.rs +++ b/packages/apalis-sql/src/sqlite.rs @@ -6,7 +6,8 @@ use apalis_core::layers::{Ack, AckLayer}; use apalis_core::poller::controller::Controller; use apalis_core::poller::stream::BackendStream; use apalis_core::poller::Poller; -use apalis_core::request::{Request, RequestStream}; +use apalis_core::request::{Parts, Request, RequestStream}; +use apalis_core::response::Response; use apalis_core::storage::Storage; use apalis_core::task::namespace::Namespace; use apalis_core::task::task_id::TaskId; @@ -178,10 +179,11 @@ where worker_id: &WorkerId, interval: Duration, buffer_size: usize, - ) -> impl Stream>, sqlx::Error>> { + ) -> impl Stream>, sqlx::Error>> { let pool = self.pool.clone(); let worker_id = worker_id.clone(); let config = self.config.clone(); + let namespace = Namespace(self.config.namespace.clone()); try_stream! { loop { let tx = pool.clone(); @@ -199,14 +201,13 @@ where for id in ids { let res = fetch_next(&pool, &worker_id, id.0, &config).await?; yield match res { - None => None::>, + None => None::>, Some(job) => { - let (req, ctx) = job.into_tuple(); - let req = C::decode(req) + let (req, parts) = job.req.take_parts(); + let args = C::decode(req) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; - let req = SqlRequest::new(req, ctx); - let mut req: Request = req.into(); - req.insert(Namespace(config.namespace.clone())); + let mut req = Request::new_with_parts(args, parts); + req.parts.namespace = Some(namespace.clone()); Some(req) } } @@ -226,30 +227,35 @@ where type Error = sqlx::Error; - type Identifier = TaskId; + type Context = SqlContext; - async fn push(&mut self, job: Self::Job) -> Result { - let id = TaskId::new(); + async fn push_request( + &mut self, + job: Request, + ) -> Result, Self::Error> { let query = "INSERT INTO Jobs VALUES (?1, ?2, ?3, 'Pending', 0, 25, strftime('%s','now'), NULL, NULL, NULL, NULL)"; - - let job = C::encode(&job) + let (task, parts) = job.take_parts(); + let raw = C::encode(&task) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; let job_type = self.config.namespace.clone(); sqlx::query(query) - .bind(job) - .bind(id.to_string()) + .bind(raw) + .bind(&parts.task_id.to_string()) .bind(job_type.to_string()) .execute(&self.pool) .await?; - Ok(id) + Ok(parts) } - async fn schedule(&mut self, job: Self::Job, on: i64) -> Result { + async fn schedule_request( + &mut self, + req: Request, + on: i64, + ) -> Result, Self::Error> { let query = "INSERT INTO Jobs VALUES (?1, ?2, ?3, 'Pending', 0, 25, ?4, NULL, NULL, NULL, NULL)"; - - let id = TaskId::new(); - let job = C::encode(&job) + let id = &req.parts.task_id; + let job = C::encode(&req.args) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; let job_type = self.config.namespace.clone(); sqlx::query(query) @@ -259,13 +265,13 @@ where .bind(on) .execute(&self.pool) .await?; - Ok(id) + Ok(req.parts) } async fn fetch_by_id( &mut self, job_id: &TaskId, - ) -> Result>, Self::Error> { + ) -> Result>, Self::Error> { let fetch_query = "SELECT * FROM Jobs WHERE id = ?1"; let res: Option> = sqlx::query_as(fetch_query) .bind(job_id.to_string()) @@ -274,12 +280,12 @@ where match res { None => Ok(None), Some(job) => Ok(Some({ - let (req, ctx) = job.into_tuple(); - let req = C::decode(req) + let (req, parts) = job.req.take_parts(); + let args = C::decode(req) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; - let req = SqlRequest::new(req, ctx); - let mut req: Request = req.into(); - req.insert(Namespace(self.config.namespace.clone())); + + let mut req: Request = Request::new_with_parts(args, parts); + req.parts.namespace = Some(Namespace(self.config.namespace.clone())); req })), } @@ -291,11 +297,12 @@ where record.try_get("count") } - async fn reschedule(&mut self, job: Request, wait: Duration) -> Result<(), Self::Error> { - let task_id = job.get::().ok_or(sqlx::Error::Io(io::Error::new( - io::ErrorKind::InvalidData, - "Missing TaskId", - )))?; + async fn reschedule( + &mut self, + job: Request, + wait: Duration, + ) -> Result<(), Self::Error> { + let task_id = job.parts.task_id; let wait: i64 = wait .as_secs() @@ -316,20 +323,15 @@ where Ok(()) } - async fn update(&mut self, job: Request) -> Result<(), Self::Error> { - let ctx = job - .get::() - .ok_or(sqlx::Error::Io(io::Error::new( - io::ErrorKind::InvalidData, - "Missing SqlContext", - )))?; + async fn update(&mut self, job: Request) -> Result<(), Self::Error> { + let ctx = job.parts.context; let status = ctx.status().to_string(); - let attempts = ctx.attempts(); + let attempts = job.parts.attempt; let done_at = *ctx.done_at(); let lock_by = ctx.lock_by().clone(); let lock_at = *ctx.lock_at(); let last_error = ctx.last_error().clone(); - let job_id = ctx.id(); + let job_id = job.parts.task_id; let mut tx = self.pool.acquire().await?; let query = "UPDATE Jobs SET status = ?1, attempts = ?2, done_at = ?3, lock_by = ?4, lock_at = ?5, last_error = ?6 WHERE id = ?7"; @@ -439,11 +441,11 @@ impl SqliteStorage { } } -impl Backend, Res> - for SqliteStorage +impl + Backend, Res> for SqliteStorage { - type Stream = BackendStream>>; - type Layer = AckLayer, T, Res>; + type Stream = BackendStream>>; + type Layer = AckLayer, T, SqlContext, Res>; fn poll(mut self, worker: WorkerId) -> Poller { let layer = AckLayer::new(self.clone()); @@ -470,22 +472,18 @@ impl Backe impl Ack for SqliteStorage { type Context = SqlContext; type AckError = sqlx::Error; - async fn ack( - &mut self, - ctx: &Self::Context, - res: &Result, - ) -> Result<(), sqlx::Error> { + async fn ack(&mut self, ctx: &Self::Context, res: &Response) -> Result<(), sqlx::Error> { let pool = self.pool.clone(); let query = "UPDATE Jobs SET status = ?4, done_at = strftime('%s','now'), last_error = ?3, attempts =?5 WHERE id = ?1 AND lock_by = ?2"; - let result = serde_json::to_string(&res.as_ref().map_err(|r| r.to_string())) + let result = serde_json::to_string(&res.inner.as_ref().map_err(|r| r.to_string())) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; sqlx::query(query) - .bind(ctx.id().to_string()) + .bind(res.task_id.to_string()) .bind(ctx.lock_by().as_ref().unwrap().to_string()) .bind(result) - .bind(calculate_status(res).to_string()) - .bind(ctx.attempts().current() as i64 + 1) + .bind(calculate_status(&res.inner).to_string()) + .bind(res.attempt.current() as i64 + 1) .execute(&pool) .await?; Ok(()) @@ -544,7 +542,7 @@ mod tests { async fn consume_one( storage: &mut SqliteStorage, worker_id: &WorkerId, - ) -> Request { + ) -> Request { let mut stream = storage .stream_jobs(worker_id, std::time::Duration::from_secs(10), 1) .boxed(); @@ -574,7 +572,10 @@ mod tests { storage.push(email).await.expect("failed to push a job"); } - async fn get_job(storage: &mut SqliteStorage, job_id: &TaskId) -> Request { + async fn get_job( + storage: &mut SqliteStorage, + job_id: &TaskId, + ) -> Request { storage .fetch_by_id(job_id) .await @@ -592,7 +593,7 @@ mod tests { assert_eq!(len, 1); let job = consume_one(&mut storage, &worker_id).await; - let ctx = job.get::().unwrap(); + let ctx = job.parts.context; assert_eq!(*ctx.status(), State::Running); assert_eq!(*ctx.lock_by(), Some(worker_id.clone())); assert!(ctx.lock_at().is_some()); @@ -605,17 +606,19 @@ mod tests { push_email(&mut storage, example_good_email()).await; let job = consume_one(&mut storage, &worker_id).await; - let ctx = job.get::(); - assert!(ctx.is_some()); - let job_id = ctx.unwrap().id(); - + let job_id = &job.parts.task_id; + let ctx = &job.parts.context; + let res = 1usize; storage - .ack(ctx.as_ref().unwrap(), &Ok(())) + .ack( + ctx, + &Response::success(res, job_id.clone(), job.parts.attempt.clone()), + ) .await .expect("failed to acknowledge the job"); let job = get_job(&mut storage, job_id).await; - let ctx = job.get::().unwrap(); + let ctx = job.parts.context; assert_eq!(*ctx.status(), State::Done); assert!(ctx.done_at().is_some()); } @@ -629,8 +632,7 @@ mod tests { let worker_id = register_worker(&mut storage).await; let job = consume_one(&mut storage, &worker_id).await; - let ctx = job.get::().unwrap(); - let job_id = ctx.id(); + let job_id = &job.parts.task_id; storage .kill(&worker_id, job_id) @@ -638,7 +640,7 @@ mod tests { .expect("failed to kill job"); let job = get_job(&mut storage, job_id).await; - let ctx = job.get::().unwrap(); + let ctx = job.parts.context; assert_eq!(*ctx.status(), State::Killed); assert!(ctx.done_at().is_some()); } @@ -654,15 +656,13 @@ mod tests { let worker_id = register_worker_at(&mut storage, six_minutes_ago.timestamp()).await; let job = consume_one(&mut storage, &worker_id).await; - let ctx = job.get::().unwrap(); + let job_id = &job.parts.task_id; storage .reenqueue_orphaned(six_minutes_ago.timestamp()) .await .expect("failed to heartbeat"); - - let job_id = ctx.id(); let job = get_job(&mut storage, job_id).await; - let ctx = job.get::().unwrap(); + let ctx = &job.parts.context; assert_eq!(*ctx.status(), State::Running); assert!(ctx.done_at().is_none()); assert!(ctx.lock_by().is_some()); @@ -680,15 +680,14 @@ mod tests { let worker_id = register_worker_at(&mut storage, four_minutes_ago.timestamp()).await; let job = consume_one(&mut storage, &worker_id).await; - let ctx = job.get::().unwrap(); + let job_id = &job.parts.task_id; storage .reenqueue_orphaned(four_minutes_ago.timestamp()) .await .expect("failed to heartbeat"); - let job_id = ctx.id(); let job = get_job(&mut storage, job_id).await; - let ctx = job.get::().unwrap(); + let ctx = &job.parts.context; assert_eq!(*ctx.status(), State::Running); assert_eq!(*ctx.lock_by(), Some(worker_id)); } diff --git a/src/layers/catch_panic/mod.rs b/src/layers/catch_panic/mod.rs index f4b39d75..2f5bdacd 100644 --- a/src/layers/catch_panic/mod.rs +++ b/src/layers/catch_panic/mod.rs @@ -1,3 +1,4 @@ +use std::any::Any; use std::fmt; use std::future::Future; use std::panic::{catch_unwind, AssertUnwindSafe}; @@ -12,59 +13,77 @@ use tower::Service; /// Apalis Layer that catches panics in the service. #[derive(Clone, Debug)] -pub struct CatchPanicLayer; +pub struct CatchPanicLayer { + on_panic: F, +} -impl CatchPanicLayer { - /// Creates a new `CatchPanicLayer`. +impl CatchPanicLayer) -> Error> { + /// Creates a new `CatchPanicLayer` with a default panic handler. pub fn new() -> Self { - CatchPanicLayer + CatchPanicLayer { + on_panic: default_handler, + } } } -impl Default for CatchPanicLayer { - fn default() -> Self { - Self::new() +impl CatchPanicLayer +where + F: FnMut(Box) -> Error + Clone, +{ + /// Creates a new `CatchPanicLayer` with a custom panic handler. + pub fn with_panic_handler(on_panic: F) -> Self { + CatchPanicLayer { on_panic } } } -impl Layer for CatchPanicLayer { - type Service = CatchPanicService; +impl Layer for CatchPanicLayer +where + F: FnMut(Box) -> Error + Clone, +{ + type Service = CatchPanicService; fn layer(&self, service: S) -> Self::Service { - CatchPanicService { service } + CatchPanicService { + service, + on_panic: self.on_panic.clone(), + } } } /// Apalis Service that catches panics. #[derive(Clone, Debug)] -pub struct CatchPanicService { +pub struct CatchPanicService { service: S, + on_panic: F, } -impl Service> for CatchPanicService +impl Service> for CatchPanicService where - S: Service, Response = Res, Error = Error>, + S: Service, Response = Res, Error = Error>, + F: FnMut(Box) -> Error + Clone, { type Response = S::Response; type Error = S::Error; - type Future = CatchPanicFuture; + type Future = CatchPanicFuture; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.service.poll_ready(cx) } - fn call(&mut self, request: Request) -> Self::Future { + fn call(&mut self, request: Request) -> Self::Future { CatchPanicFuture { future: self.service.call(request), + on_panic: self.on_panic.clone(), } } } pin_project_lite::pin_project! { /// A wrapper that catches panics during execution - pub struct CatchPanicFuture { + pub struct CatchPanicFuture { #[pin] - future: F, + future: Fut, + on_panic: F, } } @@ -80,9 +99,10 @@ impl fmt::Display for PanicError { } } -impl Future for CatchPanicFuture +impl Future for CatchPanicFuture where - F: Future>, + Fut: Future>, + F: FnMut(Box) -> Error, { type Output = Result; @@ -91,24 +111,24 @@ where match catch_unwind(AssertUnwindSafe(|| this.future.poll(cx))) { Ok(res) => res, - Err(e) => { - let panic_info = if let Some(s) = e.downcast_ref::<&str>() { - s.to_string() - } else if let Some(s) = e.downcast_ref::() { - s.clone() - } else { - "Unknown panic".to_string() - }; - // apalis assumes service functions are pure - // therefore a panic should ideally abort - Poll::Ready(Err(Error::Abort(Arc::new(Box::new(PanicError( - panic_info, - )))))) - } + Err(e) => Poll::Ready(Err((this.on_panic)(e))), } } } +fn default_handler(e: Box) -> Error { + let panic_info = if let Some(s) = e.downcast_ref::<&str>() { + s.to_string() + } else if let Some(s) = e.downcast_ref::() { + s.clone() + } else { + "Unknown panic".to_string() + }; + // apalis assumes service functions are pure + // therefore a panic should ideally abort + Error::Abort(Arc::new(Box::new(PanicError(panic_info)))) +} + #[cfg(test)] mod tests { use super::*; @@ -122,7 +142,7 @@ mod tests { #[derive(Clone)] struct TestService; - impl Service> for TestService { + impl Service> for TestService { type Response = usize; type Error = Error; type Future = Pin> + Send>>; @@ -131,7 +151,7 @@ mod tests { Poll::Ready(Ok(())) } - fn call(&mut self, _req: Request) -> Self::Future { + fn call(&mut self, _req: Request) -> Self::Future { Box::pin(async { Ok(42) }) } } @@ -151,7 +171,7 @@ mod tests { async fn test_catch_panic_layer_panics() { struct PanicService; - impl Service> for PanicService { + impl Service> for PanicService { type Response = usize; type Error = Error; type Future = Pin> + Send>>; @@ -160,7 +180,7 @@ mod tests { Poll::Ready(Ok(())) } - fn call(&mut self, _req: Request) -> Self::Future { + fn call(&mut self, _req: Request) -> Self::Future { Box::pin(async { None.unwrap() }) } } @@ -174,8 +194,8 @@ mod tests { assert!(response.is_err()); assert_eq!( - response.unwrap_err().to_string()[0..87], - *"FailedError: PanicError: called `Option::unwrap()` on a `None` value, Backtrace: 0: " + response.unwrap_err().to_string(), + *"AbortError: PanicError: called `Option::unwrap()` on a `None` value" ); } } diff --git a/src/layers/mod.rs b/src/layers/mod.rs index 0e28e943..93294444 100644 --- a/src/layers/mod.rs +++ b/src/layers/mod.rs @@ -32,3 +32,5 @@ pub use tower::timeout::TimeoutLayer; #[cfg(feature = "catch-panic")] #[cfg_attr(docsrs, doc(cfg(feature = "catch-panic")))] pub mod catch_panic; + +pub use apalis_core::error::ErrorHandlingLayer; diff --git a/src/layers/prometheus/mod.rs b/src/layers/prometheus/mod.rs index 66923d1f..99507b59 100644 --- a/src/layers/prometheus/mod.rs +++ b/src/layers/prometheus/mod.rs @@ -4,7 +4,7 @@ use std::{ time::Instant, }; -use apalis_core::{error::Error, request::Request, task::namespace::Namespace}; +use apalis_core::{error::Error, request::Request}; use futures::Future; use pin_project_lite::pin_project; use tower::{Layer, Service}; @@ -27,25 +27,30 @@ pub struct PrometheusService { service: S, } -impl Service> for PrometheusService +impl Service> for PrometheusService where - S: Service, Response = Res, Error = Error, Future = F>, - F: Future> + 'static, + Svc: Service, Response = Res, Error = Error, Future = Fut>, + Fut: Future> + 'static, { - type Response = S::Response; - type Error = S::Error; - type Future = ResponseFuture; + type Response = Svc::Response; + type Error = Svc::Error; + type Future = ResponseFuture; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.service.poll_ready(cx) } - fn call(&mut self, request: Request) -> Self::Future { + fn call(&mut self, request: Request) -> Self::Future { let start = Instant::now(); - let namespace = request.get::().unwrap().to_string(); + let namespace = request + .parts + .namespace + .as_ref() + .map(|ns| ns.0.to_string()) + .unwrap_or(std::any::type_name::().to_string()); let req = self.service.call(request); - let job_type = std::any::type_name::().to_string(); + let job_type = std::any::type_name::().to_string(); ResponseFuture { inner: req, diff --git a/src/layers/retry/mod.rs b/src/layers/retry/mod.rs index 7d455c07..3dbbafda 100644 --- a/src/layers/retry/mod.rs +++ b/src/layers/retry/mod.rs @@ -1,15 +1,13 @@ use futures::future; use tower::retry::Policy; +use apalis_core::{error::Error, request::Request}; /// Re-export from [`RetryLayer`] /// /// [`RetryLayer`]: tower::retry::RetryLayer pub use tower::retry::RetryLayer; -use apalis_core::task::attempt::Attempt; -use apalis_core::{error::Error, request::Request}; - -type Req = Request; +type Req = Request; type Err = Error; /// Retries a task instantly for `retries` @@ -31,14 +29,15 @@ impl RetryPolicy { } } -impl Policy, Res, Err> for RetryPolicy +impl Policy, Res, Err> for RetryPolicy where T: Clone, + Ctx: Clone, { type Future = future::Ready; - fn retry(&self, req: &Req, result: Result<&Res, &Err>) -> Option { - let ctx = req.get::().cloned().unwrap_or_default(); + fn retry(&self, req: &Req, result: Result<&Res, &Err>) -> Option { + let attempt = &req.parts.attempt; match result { Ok(_) => { // Treat all `Response`s as success, @@ -46,22 +45,14 @@ where None } Err(_) if self.retries == 0 => None, - Err(_) if (self.retries - ctx.current() > 0) => Some(future::ready(self.clone())), + Err(_) if (self.retries - attempt.current() > 0) => Some(future::ready(self.clone())), Err(_) => None, } } - fn clone_request(&self, req: &Req) -> Option> { - let mut req = req.clone(); - let value = req - .get::() - .cloned() - .map(|attempt| { - attempt.increment(); - attempt - }) - .unwrap_or_default(); - req.insert(value); + fn clone_request(&self, req: &Req) -> Option> { + let req = req.clone(); + req.parts.attempt.increment(); Some(req) } } diff --git a/src/layers/sentry/mod.rs b/src/layers/sentry/mod.rs index de6f9ae2..7e6d50c6 100644 --- a/src/layers/sentry/mod.rs +++ b/src/layers/sentry/mod.rs @@ -1,16 +1,13 @@ +use sentry_core::protocol; use std::fmt::Debug; use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; - -use apalis_core::task::namespace::Namespace; -use sentry_core::protocol; use tower::Layer; use tower::Service; use apalis_core::error::Error; use apalis_core::request::Request; -use apalis_core::task::attempt::Attempt; use apalis_core::task::task_id::TaskId; /// Tower Layer that logs Job Details. @@ -126,34 +123,39 @@ where } } -impl Service> for SentryJobService +impl Service> for SentryJobService where - S: Service, Response = Res, Error = Error, Future = F>, - F: Future> + 'static, + Svc: Service, Response = Res, Error = Error, Future = Fut>, + Fut: Future> + 'static, { - type Response = S::Response; - type Error = S::Error; - type Future = SentryHttpFuture; + type Response = Svc::Response; + type Error = Svc::Error; + type Future = SentryHttpFuture; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.service.poll_ready(cx) } - fn call(&mut self, request: Request) -> Self::Future { - let job_type = std::any::type_name::().to_string(); - let ctx = request.get::().cloned().unwrap_or_default(); - let task_id = request.get::().unwrap(); - let namespace = request.get::().unwrap(); - let trx_ctx = sentry_core::TransactionContext::new(namespace, "apalis.job"); - - let job_details = Task { + fn call(&mut self, request: Request) -> Self::Future { + let task_type = std::any::type_name::().to_string(); + let attempt = &request.parts.attempt; + let task_id = &request.parts.task_id; + let namespace = request + .parts + .namespace + .as_ref() + .map(|s| s.0.as_str()) + .unwrap_or(std::any::type_name::()); + let trx_ctx = sentry_core::TransactionContext::new(namespace, "apalis.task"); + + let task_details = Task { id: task_id.clone(), - current_attempt: ctx.current().try_into().unwrap(), - namespace: job_type, + current_attempt: attempt.current().try_into().unwrap(), + namespace: task_type, }; SentryHttpFuture { - on_first_poll: Some((job_details, trx_ctx)), + on_first_poll: Some((task_details, trx_ctx)), transaction: None, future: self.service.call(request), } diff --git a/src/layers/tracing/make_span.rs b/src/layers/tracing/make_span.rs index 4de8f651..2ef9eb3a 100644 --- a/src/layers/tracing/make_span.rs +++ b/src/layers/tracing/make_span.rs @@ -8,22 +8,22 @@ use super::DEFAULT_MESSAGE_LEVEL; /// /// [`Span`]: tracing::Span /// [`Trace`]: super::Trace -pub trait MakeSpan { +pub trait MakeSpan { /// Make a span from a request. - fn make_span(&mut self, request: &Request) -> Span; + fn make_span(&mut self, request: &Request) -> Span; } -impl MakeSpan for Span { - fn make_span(&mut self, _request: &Request) -> Span { +impl MakeSpan for Span { + fn make_span(&mut self, _request: &Request) -> Span { self.clone() } } -impl MakeSpan for F +impl MakeSpan for F where - F: FnMut(&Request) -> Span, + F: FnMut(&Request) -> Span, { - fn make_span(&mut self, request: &Request) -> Span { + fn make_span(&mut self, request: &Request) -> Span { self(request) } } @@ -62,8 +62,8 @@ impl Default for DefaultMakeSpan { } } -impl MakeSpan for DefaultMakeSpan { - fn make_span(&mut self, _req: &Request) -> Span { +impl MakeSpan for DefaultMakeSpan { + fn make_span(&mut self, _req: &Request) -> Span { // This ugly macro is needed, unfortunately, because `tracing::span!` // required the level argument to be static. Meaning we can't just pass // `self.level`. diff --git a/src/layers/tracing/mod.rs b/src/layers/tracing/mod.rs index 2ceb3e96..0f675090 100644 --- a/src/layers/tracing/mod.rs +++ b/src/layers/tracing/mod.rs @@ -3,7 +3,7 @@ mod on_failure; mod on_request; mod on_response; -use apalis_core::{error::Error, request::Request}; +use apalis_core::request::Request; use std::{ fmt::{self, Debug}, pin::Pin, @@ -289,26 +289,26 @@ impl } } -impl Service> +impl Service> for Trace where - S: Service, Response = Res, Error = Error, Future = F> + Unpin + Send + 'static, + S: Service, Response = Res, Future = F> + Unpin + Send + 'static, S::Error: fmt::Display + 'static, - MakeSpanT: MakeSpan, - OnRequestT: OnRequest, + MakeSpanT: MakeSpan, + OnRequestT: OnRequest, OnResponseT: OnResponse + Clone + 'static, - F: Future> + 'static, - OnFailureT: OnFailure + Clone + 'static, + F: Future> + 'static, + OnFailureT: OnFailure + Clone + 'static, { type Response = Res; - type Error = Error; + type Error = S::Error; type Future = ResponseFuture; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { self.inner.poll_ready(cx) } - fn call(&mut self, req: Request) -> Self::Future { + fn call(&mut self, req: Request) -> Self::Future { let span = self.make_span.make_span(&req); let start = Instant::now(); let job = { @@ -339,14 +339,14 @@ pin_project! { } } -impl Future for ResponseFuture +impl Future for ResponseFuture where - Fut: Future>, + Fut: Future>, OnResponseT: OnResponse, - OnFailureT: OnFailure, + OnFailureT: OnFailure, { - type Output = Result; + type Output = Result; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let this = self.project(); diff --git a/src/layers/tracing/on_failure.rs b/src/layers/tracing/on_failure.rs index 20bc0719..43a53923 100644 --- a/src/layers/tracing/on_failure.rs +++ b/src/layers/tracing/on_failure.rs @@ -1,8 +1,6 @@ -use apalis_core::error::Error; - use super::{LatencyUnit, DEFAULT_ERROR_LEVEL}; -use std::time::Duration; +use std::{fmt::Display, time::Duration}; use tracing::{Level, Span}; /// Trait used to tell [`Trace`] what to do when a request fails. @@ -11,7 +9,7 @@ use tracing::{Level, Span}; /// `on_failure` callback is called. /// /// [`Trace`]: super::Trace -pub trait OnFailure { +pub trait OnFailure { /// Do the thing. /// /// `latency` is the duration since the request was received. @@ -23,19 +21,19 @@ pub trait OnFailure { /// [`Span`]: https://docs.rs/tracing/latest/tracing/span/index.html /// [record]: https://docs.rs/tracing/latest/tracing/span/struct.Span.html#method.record /// [`TraceLayer::make_span_with`]: crate::layers::tracing::TraceLayer::make_span_with - fn on_failure(&mut self, error: &Error, latency: Duration, span: &Span); + fn on_failure(&mut self, error: &E, latency: Duration, span: &Span); } -impl OnFailure for () { +impl OnFailure for () { #[inline] - fn on_failure(&mut self, _: &Error, _: Duration, _: &Span) {} + fn on_failure(&mut self, _: &E, _: Duration, _: &Span) {} } -impl OnFailure for F +impl OnFailure for F where - F: FnMut(&Error, Duration, &Span), + F: FnMut(&E, Duration, &Span), { - fn on_failure(&mut self, error: &Error, latency: Duration, span: &Span) { + fn on_failure(&mut self, error: &E, latency: Duration, span: &Span) { self(error, latency, span) } } @@ -135,8 +133,8 @@ macro_rules! log_pattern_match { }; } -impl OnFailure for DefaultOnFailure { - fn on_failure(&mut self, error: &Error, latency: Duration, span: &Span) { +impl OnFailure for DefaultOnFailure { + fn on_failure(&mut self, error: &E, latency: Duration, span: &Span) { log_pattern_match!( self, span, diff --git a/src/layers/tracing/on_request.rs b/src/layers/tracing/on_request.rs index f0be6b3b..c983d721 100644 --- a/src/layers/tracing/on_request.rs +++ b/src/layers/tracing/on_request.rs @@ -10,7 +10,7 @@ use tracing::Span; /// `on_request` callback is called. /// /// [`Trace`]: super::Trace -pub trait OnRequest { +pub trait OnRequest { /// Do the thing. /// /// `span` is the `tracing` [`Span`], corresponding to this request, produced by the closure @@ -20,19 +20,19 @@ pub trait OnRequest { /// [`Span`]: https://docs.rs/tracing/latest/tracing/span/index.html /// [record]: https://docs.rs/tracing/latest/tracing/span/struct.Span.html#method.record /// [`TraceLayer::make_span_with`]: crate::layers::tracing::TraceLayer::make_span_with - fn on_request(&mut self, request: &Request, span: &Span); + fn on_request(&mut self, request: &Request, span: &Span); } -impl OnRequest for () { +impl OnRequest for () { #[inline] - fn on_request(&mut self, _: &Request, _: &Span) {} + fn on_request(&mut self, _: &Request, _: &Span) {} } -impl OnRequest for F +impl OnRequest for F where - F: FnMut(&Request, &Span), + F: FnMut(&Request, &Span), { - fn on_request(&mut self, request: &Request, span: &Span) { + fn on_request(&mut self, request: &Request, span: &Span) { self(request, span) } } @@ -76,8 +76,8 @@ impl DefaultOnRequest { } } -impl OnRequest for DefaultOnRequest { - fn on_request(&mut self, _: &Request, _: &Span) { +impl OnRequest for DefaultOnRequest { + fn on_request(&mut self, _: &Request, _: &Span) { match self.level { Level::ERROR => { tracing::event!(Level::ERROR, "job.start",); diff --git a/src/lib.rs b/src/lib.rs index 54892a40..493cd3e9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -110,8 +110,8 @@ pub mod prelude { poller::{controller::Controller, FetchNext, Poller}, request::{Request, RequestStream}, response::IntoResponse, - service_fn::{service_fn, FromData, ServiceFn}, - storage::{Storage, StorageStream}, + service_fn::{service_fn, FromRequest, ServiceFn}, + storage::Storage, task::attempt::Attempt, task::task_id::TaskId, worker::{Context, Event, Ready, Worker, WorkerError, WorkerId}, From d62281fdeb3e50fdfcb6306e141366812e2318a9 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Tue, 17 Sep 2024 09:29:50 +0300 Subject: [PATCH 48/59] bump: to 0.6.0-rc.7 (#418) --- Cargo.toml | 4 ++-- packages/apalis-core/Cargo.toml | 2 +- packages/apalis-cron/Cargo.toml | 4 ++-- packages/apalis-redis/Cargo.toml | 4 ++-- packages/apalis-sql/Cargo.toml | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 1c0f3723..eef5f103 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,7 +4,7 @@ repository = "https://github.com/geofmureithi/apalis" [package] name = "apalis" -version = "0.6.0-rc.6" +version = "0.6.0-rc.7" authors = ["Geoffrey Mureithi "] description = "Simple, extensible multithreaded background job processing for Rust" edition.workspace = true @@ -58,7 +58,7 @@ layers = [ docsrs = ["document-features"] [dependencies.apalis-core] -version = "0.6.0-rc.6" +version = "0.6.0-rc.7" default-features = false path = "./packages/apalis-core" diff --git a/packages/apalis-core/Cargo.toml b/packages/apalis-core/Cargo.toml index ffb31f91..767e09b7 100644 --- a/packages/apalis-core/Cargo.toml +++ b/packages/apalis-core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-core" -version = "0.6.0-rc.6" +version = "0.6.0-rc.7" authors = ["Njuguna Mureithi "] edition.workspace = true repository.workspace = true diff --git a/packages/apalis-cron/Cargo.toml b/packages/apalis-cron/Cargo.toml index 02fa8260..1242da08 100644 --- a/packages/apalis-cron/Cargo.toml +++ b/packages/apalis-cron/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-cron" -version = "0.6.0-rc.6" +version = "0.6.0-rc.7" edition.workspace = true repository.workspace = true authors = ["Njuguna Mureithi "] @@ -10,7 +10,7 @@ description = "A simple yet extensible library for cron-like job scheduling for # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.6", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.7", default-features = false, features = [ "sleep", ] } cron = "0.12.1" diff --git a/packages/apalis-redis/Cargo.toml b/packages/apalis-redis/Cargo.toml index d9205e20..bd62b272 100644 --- a/packages/apalis-redis/Cargo.toml +++ b/packages/apalis-redis/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-redis" -version = "0.6.0-rc.6" +version = "0.6.0-rc.7" authors = ["Njuguna Mureithi "] edition.workspace = true repository.workspace = true @@ -12,7 +12,7 @@ description = "Redis Storage for apalis: use Redis for background jobs and messa # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.6", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.7", default-features = false, features = [ "sleep", "json", ] } diff --git a/packages/apalis-sql/Cargo.toml b/packages/apalis-sql/Cargo.toml index 80851d2f..7ecb7db8 100644 --- a/packages/apalis-sql/Cargo.toml +++ b/packages/apalis-sql/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-sql" -version = "0.6.0-rc.6" +version = "0.6.0-rc.7" authors = ["Njuguna Mureithi "] edition.workspace = true repository.workspace = true @@ -26,7 +26,7 @@ features = ["chrono"] [dependencies] serde = { version = "1", features = ["derive"] } serde_json = "1" -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.6", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.7", default-features = false, features = [ "sleep", "json", ] } From 3166d7c97bffbb06276621b5e1da37b99b03b739 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Thu, 3 Oct 2024 22:37:46 +0300 Subject: [PATCH 49/59] fix: apply `FromRequest` for items in `Parts` (#425) Problem: We are missing crucial `FromRequest` impls for: - TaskId - Attempt - Namespace Also removed `Context` Solution: Implement `FromRequest` for these Types. --- examples/fn-args/src/main.rs | 16 +++++++++------- packages/apalis-core/src/task/namespace.rs | 11 +++++++++++ packages/apalis-core/src/task/task_id.rs | 8 ++++++++ packages/apalis-core/src/worker/mod.rs | 7 ------- 4 files changed, 28 insertions(+), 14 deletions(-) diff --git a/examples/fn-args/src/main.rs b/examples/fn-args/src/main.rs index 4a28d285..1e5655f6 100644 --- a/examples/fn-args/src/main.rs +++ b/examples/fn-args/src/main.rs @@ -19,17 +19,18 @@ struct SimpleJob {} // A task can have up to 16 arguments async fn simple_job( - _: SimpleJob, // Required, must be of the type of the job/message - worker_id: Data, // The worker running the job, added by worker - _worker_ctx: Context, // The worker context, added by worker + _: SimpleJob, // Required, must be of the type of the job/message + worker_id: Data, // The worker running the job, added by worker + _worker_ctx: Data>, // The worker context, added by worker _sqlite: Data>, // The source, added by storage - task_id: Data, // The task id, added by storage - ctx: SqlContext, // The task context - count: Data, // Our custom data added via layer + task_id: TaskId, // The task id, added by storage + attempt: Attempt, // The current attempt + ctx: SqlContext, // The task context provided by the backend + count: Data, // Our custom data added via layer ) { // increment the counter let current = count.fetch_add(1, Ordering::Relaxed); - info!("worker: {worker_id:?}; task_id: {task_id:?}, ctx: {ctx:?}, count: {current:?}"); + info!("worker: {worker_id:?}; task_id: {task_id:?}, ctx: {ctx:?}, attempt:{attempt:?} count: {current:?}"); } async fn produce_jobs(storage: &mut SqliteStorage) { @@ -62,6 +63,7 @@ async fn main() -> Result<(), std::io::Error> { .register_with_count(2, { WorkerBuilder::new("tasty-banana") .data(Count::default()) + .data(sqlite.clone()) .backend(sqlite) .build_fn(simple_job) }) diff --git a/packages/apalis-core/src/task/namespace.rs b/packages/apalis-core/src/task/namespace.rs index 16a5c9d0..dfed96be 100644 --- a/packages/apalis-core/src/task/namespace.rs +++ b/packages/apalis-core/src/task/namespace.rs @@ -4,6 +4,10 @@ use std::ops::Deref; use serde::{Deserialize, Serialize}; +use crate::error::Error; +use crate::request::Request; +use crate::service_fn::FromRequest; + /// A wrapper type that defines a task's namespace. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Namespace(pub String); @@ -39,3 +43,10 @@ impl AsRef for Namespace { &self.0 } } + +impl FromRequest> for Namespace { + fn from_request(req: &Request) -> Result { + let msg = "Missing `Namespace`. This is a bug, please file a report with the backend you are using".to_owned(); + req.parts.namespace.clone().ok_or(Error::MissingData(msg)) + } +} diff --git a/packages/apalis-core/src/task/task_id.rs b/packages/apalis-core/src/task/task_id.rs index 455e531f..22967055 100644 --- a/packages/apalis-core/src/task/task_id.rs +++ b/packages/apalis-core/src/task/task_id.rs @@ -6,6 +6,8 @@ use std::{ use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer}; use ulid::Ulid; +use crate::{error::Error, request::Request, service_fn::FromRequest}; + /// A wrapper type that defines a task id. #[derive(Debug, Clone, Eq, Hash, PartialEq)] pub struct TaskId(Ulid); @@ -58,6 +60,12 @@ impl<'de> Deserialize<'de> for TaskId { } } +impl FromRequest> for TaskId { + fn from_request(req: &Request) -> Result { + Ok(req.parts.task_id.clone()) + } +} + struct TaskIdVisitor; impl<'de> Visitor<'de> for TaskIdVisitor { diff --git a/packages/apalis-core/src/worker/mod.rs b/packages/apalis-core/src/worker/mod.rs index e5cf5c69..99a1bb39 100644 --- a/packages/apalis-core/src/worker/mod.rs +++ b/packages/apalis-core/src/worker/mod.rs @@ -6,7 +6,6 @@ use crate::monitor::{Monitor, MonitorContext}; use crate::notify::Notify; use crate::poller::FetchNext; use crate::request::Request; -use crate::service_fn::FromRequest; use crate::Backend; use futures::future::Shared; use futures::{Future, FutureExt}; @@ -534,12 +533,6 @@ impl fmt::Debug for Context { } } -impl FromRequest> for Context { - fn from_request(req: &Request) -> Result { - req.get_checked::().cloned() - } -} - pin_project! { struct Tracked { worker: Context, From e1d7e6bbc3a56e05db2a4c95958c717b92900a72 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Fri, 4 Oct 2024 08:09:32 +0300 Subject: [PATCH 50/59] fix:[bug] include backend provided layer in service layers. (#426) * fix:[bug] include backend provided layer in service layers. Problem: The current worker logic is missing an implementation where the backend provided layer should be added to the service's layer. This is a critical issue that affects all v0.6.0-rc-7 users and they should update as soon as a new release is done. Solution: - Add backend layers to service's layer. - Add worker_consume tests on the storages to prevent regression on this. * chore: comment an enforcement rule not yet followed by redis --- packages/apalis-core/src/lib.rs | 3 ++ packages/apalis-core/src/worker/mod.rs | 7 ++-- packages/apalis-redis/src/storage.rs | 4 +- packages/apalis-sql/src/lib.rs | 54 +++++++++++++++++++++++++- packages/apalis-sql/src/postgres.rs | 3 +- src/layers/catch_panic/mod.rs | 6 +++ src/layers/prometheus/mod.rs | 3 +- 7 files changed, 72 insertions(+), 8 deletions(-) diff --git a/packages/apalis-core/src/lib.rs b/packages/apalis-core/src/lib.rs index 6f01618f..42dbe88d 100644 --- a/packages/apalis-core/src/lib.rs +++ b/packages/apalis-core/src/lib.rs @@ -398,6 +398,9 @@ pub mod test_utils { assert_eq!(res, 1); // A job exists let res = t.execute_next().await; assert_eq!(res.1, Ok("1".to_owned())); + // TODO: all storages need to satisfy this rule, redis does not + // let res = t.len().await.unwrap(); + // assert_eq!(res, 0); t.vacuum().await.unwrap(); } }; diff --git a/packages/apalis-core/src/worker/mod.rs b/packages/apalis-core/src/worker/mod.rs index 99a1bb39..d0908c29 100644 --- a/packages/apalis-core/src/worker/mod.rs +++ b/packages/apalis-core/src/worker/mod.rs @@ -238,11 +238,12 @@ impl Worker> { Ctx: Send + 'static + Sync, { let notifier = Notify::new(); - let service = self.state.service; - - let (service, poll_worker) = Buffer::pair(service, instances); let backend = self.state.backend; + let service = self.state.service; let poller = backend.poll::(self.id.clone()); + let layer = poller.layer; + let service = ServiceBuilder::new().layer(layer).service(service); + let (service, poll_worker) = Buffer::pair(service, instances); let polling = poller.heartbeat.shared(); let worker_stream = WorkerStream::new(poller.stream, notifier.clone()) .into_future() diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index ec595f09..729d62ec 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -193,7 +193,7 @@ impl Config { /// set the namespace for the Storage pub fn set_namespace(mut self, namespace: &str) -> Self { - self.namespace = namespace.to_owned(); + self.namespace = namespace.to_string(); self } @@ -308,7 +308,7 @@ impl Clone for RedisStorage { scripts: self.scripts.clone(), controller: self.controller.clone(), config: self.config.clone(), - codec: self.codec.clone(), + codec: self.codec, } } } diff --git a/packages/apalis-sql/src/lib.rs b/packages/apalis-sql/src/lib.rs index 012dfcd1..9f6b4752 100644 --- a/packages/apalis-sql/src/lib.rs +++ b/packages/apalis-sql/src/lib.rs @@ -90,7 +90,7 @@ impl Config { /// /// Defaults to "apalis::sql" pub fn set_namespace(mut self, namespace: &str) -> Self { - self.namespace = namespace.to_owned(); + self.namespace = namespace.to_string(); self } @@ -223,5 +223,57 @@ macro_rules! sql_storage_tests { "{\"Err\":\"FailedError: Missing separator character '@'.\"}" ); } + + #[tokio::test] + async fn worker_consume() { + use apalis_core::builder::WorkerBuilder; + use apalis_core::builder::WorkerFactoryFn; + use apalis_core::executor::Executor; + use std::future::Future; + + #[derive(Debug, Clone)] + struct TokioTestExecutor; + + impl Executor for TokioTestExecutor { + fn spawn(&self, future: impl Future + Send + 'static) { + tokio::spawn(future); + } + } + + let storage = $setup().await; + let mut handle = storage.clone(); + + let parts = handle + .push(email_service::example_good_email()) + .await + .unwrap(); + + async fn task(_job: Email) -> &'static str { + tokio::time::sleep(Duration::from_millis(100)).await; + "Job well done" + } + let worker = WorkerBuilder::new("rango-tango").backend(storage); + let worker = worker.build_fn(task); + let worker = worker.with_executor(TokioTestExecutor); + let w = worker.clone(); + + let runner = async move { + apalis_core::sleep(Duration::from_secs(3)).await; + let job_id = &parts.task_id; + let job = get_job(&mut handle, job_id).await; + let ctx = job.parts.context; + + assert_eq!(*ctx.status(), State::Done); + assert!(ctx.done_at().is_some()); + assert!(ctx.lock_by().is_some()); + assert!(ctx.lock_at().is_some()); + assert!(ctx.last_error().is_some()); // TODO: rename last_error to last_result + + w.stop(); + }; + + let wkr = worker.run(); + tokio::join!(runner, wkr); + } }; } diff --git a/packages/apalis-sql/src/postgres.rs b/packages/apalis-sql/src/postgres.rs index 72cb1abb..067b3e3f 100644 --- a/packages/apalis-sql/src/postgres.rs +++ b/packages/apalis-sql/src/postgres.rs @@ -182,7 +182,7 @@ where ids = ack_stream.next() => { if let Some(ids) = ids { let ack_ids: Vec<(String, String, String, String, u64)> = ids.iter().map(|(ctx, res)| { - (res.task_id.to_string(), ctx.lock_by().clone().unwrap().to_string(), serde_json::to_string(&res.inner.as_ref().map_err(|e| e.to_string())).unwrap(), calculate_status(&res.inner).to_string(), (res.attempt.current() + 1) as u64 ) + (res.task_id.to_string(), ctx.lock_by().clone().unwrap().to_string(), serde_json::to_string(&res.inner.as_ref().map_err(|e| e.to_string())).expect("Could not convert response to json"), calculate_status(&res.inner).to_string(), (res.attempt.current() + 1) as u64 ) }).collect(); let query = "UPDATE apalis.jobs @@ -567,6 +567,7 @@ where .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::Interrupted, e))) .unwrap() }); + self.ack_notify .notify((ctx.clone(), res)) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::Interrupted, e)))?; diff --git a/src/layers/catch_panic/mod.rs b/src/layers/catch_panic/mod.rs index 2f5bdacd..ba869d19 100644 --- a/src/layers/catch_panic/mod.rs +++ b/src/layers/catch_panic/mod.rs @@ -26,6 +26,12 @@ impl CatchPanicLayer) -> Error> { } } +impl Default for CatchPanicLayer) -> Error> { + fn default() -> Self { + Self::new() + } +} + impl CatchPanicLayer where F: FnMut(Box) -> Error + Clone, diff --git a/src/layers/prometheus/mod.rs b/src/layers/prometheus/mod.rs index 99507b59..a0d50085 100644 --- a/src/layers/prometheus/mod.rs +++ b/src/layers/prometheus/mod.rs @@ -11,7 +11,8 @@ use tower::{Layer, Service}; /// A layer to support prometheus metrics #[derive(Debug, Default)] -pub struct PrometheusLayer; +#[non_exhaustive] +pub struct PrometheusLayer {} impl Layer for PrometheusLayer { type Service = PrometheusService; From f6442fbec34f6a0390363a6cd55118a161e4b7ab Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Fri, 11 Oct 2024 07:07:52 +0300 Subject: [PATCH 51/59] chore: bump to 0.6.0-rc.8 (#430) --- Cargo.toml | 4 ++-- packages/apalis-core/Cargo.toml | 2 +- packages/apalis-cron/Cargo.toml | 4 ++-- packages/apalis-redis/Cargo.toml | 4 ++-- packages/apalis-sql/Cargo.toml | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index eef5f103..913e652f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,7 +4,7 @@ repository = "https://github.com/geofmureithi/apalis" [package] name = "apalis" -version = "0.6.0-rc.7" +version = "0.6.0-rc.8" authors = ["Geoffrey Mureithi "] description = "Simple, extensible multithreaded background job processing for Rust" edition.workspace = true @@ -58,7 +58,7 @@ layers = [ docsrs = ["document-features"] [dependencies.apalis-core] -version = "0.6.0-rc.7" +version = "0.6.0-rc.8" default-features = false path = "./packages/apalis-core" diff --git a/packages/apalis-core/Cargo.toml b/packages/apalis-core/Cargo.toml index 767e09b7..7f2424f8 100644 --- a/packages/apalis-core/Cargo.toml +++ b/packages/apalis-core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-core" -version = "0.6.0-rc.7" +version = "0.6.0-rc.8" authors = ["Njuguna Mureithi "] edition.workspace = true repository.workspace = true diff --git a/packages/apalis-cron/Cargo.toml b/packages/apalis-cron/Cargo.toml index 1242da08..bc9ba028 100644 --- a/packages/apalis-cron/Cargo.toml +++ b/packages/apalis-cron/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-cron" -version = "0.6.0-rc.7" +version = "0.6.0-rc.8" edition.workspace = true repository.workspace = true authors = ["Njuguna Mureithi "] @@ -10,7 +10,7 @@ description = "A simple yet extensible library for cron-like job scheduling for # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.7", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.8", default-features = false, features = [ "sleep", ] } cron = "0.12.1" diff --git a/packages/apalis-redis/Cargo.toml b/packages/apalis-redis/Cargo.toml index bd62b272..f7f7f364 100644 --- a/packages/apalis-redis/Cargo.toml +++ b/packages/apalis-redis/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-redis" -version = "0.6.0-rc.7" +version = "0.6.0-rc.8" authors = ["Njuguna Mureithi "] edition.workspace = true repository.workspace = true @@ -12,7 +12,7 @@ description = "Redis Storage for apalis: use Redis for background jobs and messa # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.7", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.8", default-features = false, features = [ "sleep", "json", ] } diff --git a/packages/apalis-sql/Cargo.toml b/packages/apalis-sql/Cargo.toml index 7ecb7db8..7f147065 100644 --- a/packages/apalis-sql/Cargo.toml +++ b/packages/apalis-sql/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-sql" -version = "0.6.0-rc.7" +version = "0.6.0-rc.8" authors = ["Njuguna Mureithi "] edition.workspace = true repository.workspace = true @@ -26,7 +26,7 @@ features = ["chrono"] [dependencies] serde = { version = "1", features = ["derive"] } serde_json = "1" -apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.7", default-features = false, features = [ +apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.8", default-features = false, features = [ "sleep", "json", ] } From ae9f221348cbf54558e0db9bd9559924163b8ffa Mon Sep 17 00:00:00 2001 From: Mathias Lafeldt Date: Wed, 13 Nov 2024 11:44:46 +0100 Subject: [PATCH 52/59] fix: apply max_attempts set via SqlContext (#447) So that a custom number of attempts can be configured: let mut ctx = SqlContext::new(); ctx.set_max_attempts(2); let req = Request::new_with_ctx(job, ctx); storage.push_request(req).await.unwrap(); While the default is still to try up to 25 times: storage.push(job).await.unwrap(); --- packages/apalis-sql/src/context.rs | 8 +++++++- packages/apalis-sql/src/from_row.rs | 15 +++++++++------ packages/apalis-sql/src/mysql.rs | 7 ++++--- packages/apalis-sql/src/postgres.rs | 6 ++++-- packages/apalis-sql/src/sqlite.rs | 6 ++++-- 5 files changed, 28 insertions(+), 14 deletions(-) diff --git a/packages/apalis-sql/src/context.rs b/packages/apalis-sql/src/context.rs index 44cb0632..84900ce7 100644 --- a/packages/apalis-sql/src/context.rs +++ b/packages/apalis-sql/src/context.rs @@ -8,7 +8,7 @@ use std::{fmt, str::FromStr}; /// The context for a job is represented here /// Used to provide a context for a job with an sql backend -#[derive(Debug, Clone, Serialize, Deserialize, Default)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct SqlContext { status: State, run_at: DateTime, @@ -19,6 +19,12 @@ pub struct SqlContext { done_at: Option, } +impl Default for SqlContext { + fn default() -> Self { + Self::new() + } +} + impl SqlContext { /// Build a new context with defaults pub fn new() -> Self { diff --git a/packages/apalis-sql/src/from_row.rs b/packages/apalis-sql/src/from_row.rs index d242e4ae..88623c3a 100644 --- a/packages/apalis-sql/src/from_row.rs +++ b/packages/apalis-sql/src/from_row.rs @@ -77,8 +77,9 @@ impl<'r, T: Decode<'r, sqlx::Sqlite> + Type> let run_at: i64 = row.try_get("run_at")?; context.set_run_at(DateTime::from_timestamp(run_at, 0).unwrap_or_default()); - let max_attempts = row.try_get("max_attempts").unwrap_or(25); - context.set_max_attempts(max_attempts); + if let Ok(max_attempts) = row.try_get("max_attempts") { + context.set_max_attempts(max_attempts) + } let done_at: Option = row.try_get("done_at").unwrap_or_default(); context.set_done_at(done_at); @@ -139,8 +140,9 @@ impl<'r, T: Decode<'r, sqlx::Postgres> + Type> let run_at = row.try_get("run_at")?; context.set_run_at(run_at); - let max_attempts = row.try_get("max_attempts").unwrap_or(25); - context.set_max_attempts(max_attempts); + if let Ok(max_attempts) = row.try_get("max_attempts") { + context.set_max_attempts(max_attempts) + } let done_at: Option> = row.try_get("done_at").unwrap_or_default(); context.set_done_at(done_at.map(|d| d.timestamp())); @@ -200,8 +202,9 @@ impl<'r, T: Decode<'r, sqlx::MySql> + Type> sqlx::FromRow<'r, sqlx: let run_at = row.try_get("run_at")?; context.set_run_at(run_at); - let max_attempts = row.try_get("max_attempts").unwrap_or(25); - context.set_max_attempts(max_attempts); + if let Ok(max_attempts) = row.try_get("max_attempts") { + context.set_max_attempts(max_attempts) + } let done_at: Option = row.try_get("done_at").unwrap_or_default(); context.set_done_at(done_at.map(|d| d.and_utc().timestamp())); diff --git a/packages/apalis-sql/src/mysql.rs b/packages/apalis-sql/src/mysql.rs index f42dca31..f9a17963 100644 --- a/packages/apalis-sql/src/mysql.rs +++ b/packages/apalis-sql/src/mysql.rs @@ -233,7 +233,7 @@ where ) -> Result, sqlx::Error> { let (args, parts) = job.take_parts(); let query = - "INSERT INTO jobs VALUES (?, ?, ?, 'Pending', 0, 25, now(), NULL, NULL, NULL, NULL)"; + "INSERT INTO jobs VALUES (?, ?, ?, 'Pending', 0, ?, now(), NULL, NULL, NULL, NULL)"; let pool = self.pool.clone(); let job = C::encode(args) @@ -243,6 +243,7 @@ where .bind(job) .bind(parts.task_id.to_string()) .bind(job_type.to_string()) + .bind(parts.context.max_attempts()) .execute(&pool) .await?; Ok(parts) @@ -253,8 +254,7 @@ where req: Request, on: i64, ) -> Result, sqlx::Error> { - let query = - "INSERT INTO jobs VALUES (?, ?, ?, 'Pending', 0, 25, ?, NULL, NULL, NULL, NULL)"; + let query = "INSERT INTO jobs VALUES (?, ?, ?, 'Pending', 0, ?, ?, NULL, NULL, NULL, NULL)"; let pool = self.pool.clone(); let args = C::encode(&req.args) @@ -265,6 +265,7 @@ where .bind(args) .bind(req.parts.task_id.to_string()) .bind(job_type) + .bind(req.parts.context.max_attempts()) .bind(on) .execute(&pool) .await?; diff --git a/packages/apalis-sql/src/postgres.rs b/packages/apalis-sql/src/postgres.rs index 067b3e3f..31d6b93a 100644 --- a/packages/apalis-sql/src/postgres.rs +++ b/packages/apalis-sql/src/postgres.rs @@ -425,7 +425,7 @@ where &mut self, req: Request, ) -> Result, sqlx::Error> { - let query = "INSERT INTO apalis.jobs VALUES ($1, $2, $3, 'Pending', 0, 25, NOW() , NULL, NULL, NULL, NULL)"; + let query = "INSERT INTO apalis.jobs VALUES ($1, $2, $3, 'Pending', 0, $4, NOW() , NULL, NULL, NULL, NULL)"; let args = C::encode(&req.args) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; @@ -434,6 +434,7 @@ where .bind(args) .bind(&req.parts.task_id.to_string()) .bind(&job_type) + .bind(&req.parts.context.max_attempts()) .execute(&self.pool) .await?; Ok(req.parts) @@ -445,7 +446,7 @@ where on: Timestamp, ) -> Result, sqlx::Error> { let query = - "INSERT INTO apalis.jobs VALUES ($1, $2, $3, 'Pending', 0, 25, $4, NULL, NULL, NULL, NULL)"; + "INSERT INTO apalis.jobs VALUES ($1, $2, $3, 'Pending', 0, $4, $5, NULL, NULL, NULL, NULL)"; let task_id = req.parts.task_id.to_string(); let parts = req.parts; let on = DateTime::from_timestamp(on, 0); @@ -456,6 +457,7 @@ where .bind(job) .bind(task_id) .bind(job_type) + .bind(&parts.context.max_attempts()) .bind(on) .execute(&self.pool) .await?; diff --git a/packages/apalis-sql/src/sqlite.rs b/packages/apalis-sql/src/sqlite.rs index c4cdb7cc..93d62490 100644 --- a/packages/apalis-sql/src/sqlite.rs +++ b/packages/apalis-sql/src/sqlite.rs @@ -233,7 +233,7 @@ where &mut self, job: Request, ) -> Result, Self::Error> { - let query = "INSERT INTO Jobs VALUES (?1, ?2, ?3, 'Pending', 0, 25, strftime('%s','now'), NULL, NULL, NULL, NULL)"; + let query = "INSERT INTO Jobs VALUES (?1, ?2, ?3, 'Pending', 0, ?4, strftime('%s','now'), NULL, NULL, NULL, NULL)"; let (task, parts) = job.take_parts(); let raw = C::encode(&task) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; @@ -242,6 +242,7 @@ where .bind(raw) .bind(&parts.task_id.to_string()) .bind(job_type.to_string()) + .bind(&parts.context.max_attempts()) .execute(&self.pool) .await?; Ok(parts) @@ -253,7 +254,7 @@ where on: i64, ) -> Result, Self::Error> { let query = - "INSERT INTO Jobs VALUES (?1, ?2, ?3, 'Pending', 0, 25, ?4, NULL, NULL, NULL, NULL)"; + "INSERT INTO Jobs VALUES (?1, ?2, ?3, 'Pending', 0, ?4, ?5, NULL, NULL, NULL, NULL)"; let id = &req.parts.task_id; let job = C::encode(&req.args) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; @@ -262,6 +263,7 @@ where .bind(job) .bind(id.to_string()) .bind(job_type) + .bind(&req.parts.context.max_attempts()) .bind(on) .execute(&self.pool) .await?; From 638242a4dd6831c11b258ca5dddf8d66a2173d33 Mon Sep 17 00:00:00 2001 From: zakstucke <44890343+zakstucke@users.noreply.github.com> Date: Tue, 19 Nov 2024 14:38:11 +0200 Subject: [PATCH 53/59] Bump redis (#442) --- Cargo.toml | 2 +- examples/redis-deadpool/Cargo.toml | 2 +- packages/apalis-redis/Cargo.toml | 2 +- packages/apalis-redis/src/storage.rs | 8 ++++---- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 913e652f..c599b78e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -87,7 +87,7 @@ apalis-sql = { path = "./packages/apalis-sql", features = [ "mysql", "sqlite", ] } -redis = { version = "0.25.3", default-features = false, features = [ +redis = { version = "0.27", default-features = false, features = [ "tokio-comp", "script", "aio", diff --git a/examples/redis-deadpool/Cargo.toml b/examples/redis-deadpool/Cargo.toml index 6ac893a1..be1ac8f6 100644 --- a/examples/redis-deadpool/Cargo.toml +++ b/examples/redis-deadpool/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = "2021" [dependencies] -deadpool-redis = { version = "0.15.1" } +deadpool-redis = { version = "0.18" } anyhow = "1" tokio = { version = "1", features = ["full"] } apalis = { path = "../../", features = ["timeout"] } diff --git a/packages/apalis-redis/Cargo.toml b/packages/apalis-redis/Cargo.toml index f7f7f364..9286d52e 100644 --- a/packages/apalis-redis/Cargo.toml +++ b/packages/apalis-redis/Cargo.toml @@ -16,7 +16,7 @@ apalis-core = { path = "../../packages/apalis-core", version = "0.6.0-rc.8", def "sleep", "json", ] } -redis = { version = "0.25.4", default-features = false, features = [ +redis = { version = "0.27", default-features = false, features = [ "script", "aio", "connection-manager", diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index 729d62ec..a8b7d442 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -549,7 +549,7 @@ where .key(&signal_list) .arg(self.config.buffer_size) // No of jobs to fetch .arg(&inflight_set) - .invoke_async::<_, Vec>(&mut self.conn) + .invoke_async::>(&mut self.conn) .await; match result { @@ -579,11 +579,11 @@ fn build_error(message: &str) -> RedisError { fn deserialize_job(job: &Value) -> Result<&Vec, RedisError> { match job { - Value::Data(bytes) => Ok(bytes), - Value::Bulk(val) => val + Value::BulkString(bytes) => Ok(bytes), + Value::Array(val) | Value::Set(val) => val .first() .and_then(|val| { - if let Value::Data(bytes) = val { + if let Value::BulkString(bytes) = val { Some(bytes) } else { None From 31b6858242f86a342dcf2ac06abde47acaf59749 Mon Sep 17 00:00:00 2001 From: Mathias Lafeldt Date: Thu, 21 Nov 2024 17:53:30 +0100 Subject: [PATCH 54/59] feat: re-export sqlx (#451) Making sqlx accessible to users of apalis without requiring them to explicitly add it as a dependency. --- packages/apalis-sql/src/lib.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/apalis-sql/src/lib.rs b/packages/apalis-sql/src/lib.rs index 9f6b4752..edc37126 100644 --- a/packages/apalis-sql/src/lib.rs +++ b/packages/apalis-sql/src/lib.rs @@ -36,6 +36,9 @@ pub mod sqlite; #[cfg_attr(docsrs, doc(cfg(feature = "mysql")))] pub mod mysql; +// Re-exports +pub use sqlx; + /// Config for sql storages #[derive(Debug, Clone)] pub struct Config { From 68276eef0b4f392a1e1ad70533b1484b06601b48 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Thu, 21 Nov 2024 23:45:10 +0300 Subject: [PATCH 55/59] feat: Improve Worker management and drop Executor (#428) * feat: introducing WorkerBuilderExt which makes the work of building a new worker way easier. * improve: worker api almost there * fix: radical improvements and updates. Removed executor and got graceful shutdown working * chore: deprecate register with count and force builder order * chore: more improvements on the worker * fix: allow DI for Worker * add: get the task count by a worker * lint: fmt and clippy * fix: allow worker stopping --- README.md | 3 +- examples/actix-web/src/main.rs | 10 +- examples/async-std-runtime/src/main.rs | 28 +- examples/axum/src/main.rs | 6 +- examples/basics/src/layer.rs | 1 + examples/basics/src/main.rs | 30 +- examples/catch-panic/src/main.rs | 14 +- examples/cron/src/main.rs | 15 +- examples/fn-args/src/main.rs | 22 +- examples/graceful-shutdown/src/main.rs | 22 +- examples/mysql/src/main.rs | 8 +- examples/postgres/src/main.rs | 13 +- examples/prometheus/src/main.rs | 2 +- examples/redis-deadpool/src/main.rs | 2 +- examples/redis-mq-example/src/main.rs | 29 +- examples/redis-with-msg-pack/src/main.rs | 2 +- examples/redis/src/main.rs | 14 +- examples/sentry/src/main.rs | 9 +- examples/sqlite/src/main.rs | 10 +- examples/tracing/Cargo.toml | 2 +- examples/tracing/src/main.rs | 12 +- examples/unmonitored-worker/src/main.rs | 34 +- packages/apalis-core/Cargo.toml | 1 - packages/apalis-core/src/builder.rs | 2 +- packages/apalis-core/src/executor.rs | 7 - packages/apalis-core/src/lib.rs | 13 - packages/apalis-core/src/memory.rs | 2 +- packages/apalis-core/src/monitor/mod.rs | 263 +++---- packages/apalis-core/src/monitor/shutdown.rs | 9 +- packages/apalis-core/src/poller/mod.rs | 30 +- .../apalis-core/src/worker/buffer/error.rs | 68 -- .../apalis-core/src/worker/buffer/future.rs | 79 -- .../apalis-core/src/worker/buffer/message.rs | 16 - packages/apalis-core/src/worker/buffer/mod.rs | 5 - .../apalis-core/src/worker/buffer/service.rs | 149 ---- .../apalis-core/src/worker/buffer/worker.rs | 184 ----- packages/apalis-core/src/worker/mod.rs | 710 ++++++++---------- packages/apalis-core/src/worker/stream.rs | 55 -- packages/apalis-cron/README.md | 4 +- packages/apalis-cron/src/lib.rs | 4 +- packages/apalis-redis/src/lib.rs | 2 +- packages/apalis-redis/src/storage.rs | 6 +- packages/apalis-sql/src/lib.rs | 18 +- packages/apalis-sql/src/postgres.rs | 6 +- packages/apalis-sql/src/sqlite.rs | 2 +- src/layers/mod.rs | 282 +++++++ src/layers/tracing/make_span.rs | 6 +- src/layers/tracing/on_request.rs | 10 +- src/layers/tracing/on_response.rs | 8 +- src/lib.rs | 40 +- 50 files changed, 856 insertions(+), 1413 deletions(-) delete mode 100644 packages/apalis-core/src/executor.rs delete mode 100644 packages/apalis-core/src/worker/buffer/error.rs delete mode 100644 packages/apalis-core/src/worker/buffer/future.rs delete mode 100644 packages/apalis-core/src/worker/buffer/message.rs delete mode 100644 packages/apalis-core/src/worker/buffer/mod.rs delete mode 100644 packages/apalis-core/src/worker/buffer/service.rs delete mode 100644 packages/apalis-core/src/worker/buffer/worker.rs delete mode 100644 packages/apalis-core/src/worker/stream.rs diff --git a/README.md b/README.md index 0b2234d7..63f36105 100644 --- a/README.md +++ b/README.md @@ -90,8 +90,9 @@ async fn main() -> { let conn = apalis_redis::connect(redis_url).await.expect("Could not connect"); let storage = RedisStorage::new(conn); Monitor::new() - .register_with_count(2, { + .register({ WorkerBuilder::new(format!("email-worker")) + .concurrency(2) .data(0usize) .backend(storage) .build_fn(send_email) diff --git a/examples/actix-web/src/main.rs b/examples/actix-web/src/main.rs index 472eec8e..39c786b5 100644 --- a/examples/actix-web/src/main.rs +++ b/examples/actix-web/src/main.rs @@ -1,9 +1,8 @@ use actix_web::rt::signal; use actix_web::{web, App, HttpResponse, HttpServer}; use anyhow::Result; -use apalis::layers::tracing::TraceLayer; use apalis::prelude::*; -use apalis::utils::TokioExecutor; + use apalis_redis::RedisStorage; use futures::future; @@ -41,10 +40,11 @@ async fn main() -> Result<()> { .await?; Ok(()) }; - let worker = Monitor::::new() - .register_with_count(2, { + let worker = Monitor::new() + .register({ WorkerBuilder::new("tasty-avocado") - .layer(TraceLayer::new()) + .enable_tracing() + // .concurrency(2) .backend(storage) .build_fn(send_email) }) diff --git a/examples/async-std-runtime/src/main.rs b/examples/async-std-runtime/src/main.rs index 0b9c7ad3..0d767f12 100644 --- a/examples/async-std-runtime/src/main.rs +++ b/examples/async-std-runtime/src/main.rs @@ -1,15 +1,15 @@ -use std::{future::Future, str::FromStr, time::Duration}; +use std::{str::FromStr, time::Duration}; use anyhow::Result; use apalis::{ - layers::{retry::RetryLayer, retry::RetryPolicy, tracing::MakeSpan, tracing::TraceLayer}, + layers::{retry::RetryPolicy, tracing::MakeSpan, tracing::TraceLayer}, prelude::*, }; use apalis_cron::{CronStream, Schedule}; use chrono::{DateTime, Utc}; use tracing::{debug, info, Instrument, Level, Span}; -type WorkerCtx = Data>; +type WorkerCtx = Worker; #[derive(Default, Debug, Clone)] struct Reminder(DateTime); @@ -26,7 +26,7 @@ async fn send_in_background(reminder: Reminder) { } async fn send_reminder(reminder: Reminder, worker: WorkerCtx) -> bool { // this will happen in the workers background and wont block the next tasks - worker.spawn(send_in_background(reminder).in_current_span()); + async_std::task::spawn(worker.track(send_in_background(reminder).in_current_span())); false } @@ -42,12 +42,12 @@ async fn main() -> Result<()> { let schedule = Schedule::from_str("1/1 * * * * *").unwrap(); let worker = WorkerBuilder::new("daily-cron-worker") - .layer(RetryLayer::new(RetryPolicy::retries(5))) + .retry(RetryPolicy::retries(5)) .layer(TraceLayer::new().make_span_with(ReminderSpan::new())) .backend(CronStream::new(schedule)) .build_fn(send_reminder); - Monitor::::new() + Monitor::new() .register(worker) .on_event(|e| debug!("Worker event: {e:?}")) .run_with_signal(async { @@ -59,22 +59,6 @@ async fn main() -> Result<()> { Ok(()) } -#[derive(Clone, Debug, Default)] -pub struct AsyncStdExecutor; - -impl AsyncStdExecutor { - /// A new async-std executor - pub fn new() -> Self { - Self - } -} - -impl Executor for AsyncStdExecutor { - fn spawn(&self, fut: impl Future + Send + 'static) { - async_std::task::spawn(fut); - } -} - #[derive(Debug, Clone)] pub struct ReminderSpan { level: Level, diff --git a/examples/axum/src/main.rs b/examples/axum/src/main.rs index 3e0e4da2..99b3146e 100644 --- a/examples/axum/src/main.rs +++ b/examples/axum/src/main.rs @@ -4,7 +4,7 @@ //! cd examples && cargo run -p axum-example //! ``` use anyhow::Result; -use apalis::layers::tracing::TraceLayer; + use apalis::prelude::*; use apalis_redis::RedisStorage; use axum::{ @@ -73,10 +73,10 @@ async fn main() -> Result<()> { .map_err(|e| Error::new(std::io::ErrorKind::Interrupted, e)) }; let monitor = async { - Monitor::::new() + Monitor::new() .register({ WorkerBuilder::new("tasty-pear") - .layer(TraceLayer::new()) + .enable_tracing() .backend(storage.clone()) .build_fn(send_email) }) diff --git a/examples/basics/src/layer.rs b/examples/basics/src/layer.rs index d8da32a1..6c817f61 100644 --- a/examples/basics/src/layer.rs +++ b/examples/basics/src/layer.rs @@ -8,6 +8,7 @@ use tower::{Layer, Service}; use tracing::info; /// A layer that logs a job info before it starts +#[derive(Debug, Clone)] pub struct LogLayer { target: &'static str, } diff --git a/examples/basics/src/main.rs b/examples/basics/src/main.rs index ff492443..82043b93 100644 --- a/examples/basics/src/main.rs +++ b/examples/basics/src/main.rs @@ -4,10 +4,7 @@ mod service; use std::{sync::Arc, time::Duration}; -use apalis::{ - layers::{catch_panic::CatchPanicLayer, tracing::TraceLayer}, - prelude::*, -}; +use apalis::{layers::catch_panic::CatchPanicLayer, prelude::*}; use apalis_sql::sqlite::{SqlitePool, SqliteStorage}; use email_service::Email; @@ -15,7 +12,7 @@ use layer::LogLayer; use tracing::{log::info, Instrument, Span}; -type WorkerCtx = Context; +type WorkerCtx = Context; use crate::{cache::ValidEmailCache, service::EmailService}; @@ -72,14 +69,16 @@ async fn send_email( // This can be important for starting long running jobs that don't block the queue // Its also possible to acquire context types and clone them into the futures context. // They will also be gracefully shutdown if [`Monitor`] has a shutdown signal - worker_ctx.spawn( - async move { - if cache::fetch_validity(email_to, &cache_clone).await { - svc.send(email).await; - info!("Email added to cache") + tokio::spawn( + worker_ctx.track( + async move { + if cache::fetch_validity(email_to, &cache_clone).await { + svc.send(email).await; + info!("Email added to cache") + } } - } - .instrument(Span::current()), // Its still gonna use the jobs current tracing span. Important eg using sentry. + .instrument(Span::current()), + ), // Its still gonna use the jobs current tracing span. Important eg using sentry. ); } @@ -102,10 +101,12 @@ async fn main() -> Result<(), std::io::Error> { let sqlite: SqliteStorage = SqliteStorage::new(pool); produce_jobs(&sqlite).await; - Monitor::::new() + Monitor::new() .register({ WorkerBuilder::new("tasty-banana") // This handles any panics that may occur in any of the layers below + // .catch_panic() + // Or just to customize .layer(CatchPanicLayer::with_panic_handler(|e| { let panic_info = if let Some(s) = e.downcast_ref::<&str>() { s.to_string() @@ -114,9 +115,10 @@ async fn main() -> Result<(), std::io::Error> { } else { "Unknown panic".to_string() }; + // Abort tells the backend to kill job Error::Abort(Arc::new(Box::new(PanicError::Panic(panic_info)))) })) - .layer(TraceLayer::new()) + .enable_tracing() .layer(LogLayer::new("some-log-example")) // Add shared context to all jobs executed by this worker .data(EmailService::new()) diff --git a/examples/catch-panic/src/main.rs b/examples/catch-panic/src/main.rs index 4eca4e0a..de87e094 100644 --- a/examples/catch-panic/src/main.rs +++ b/examples/catch-panic/src/main.rs @@ -1,7 +1,6 @@ use anyhow::Result; -use apalis::layers::catch_panic::CatchPanicLayer; -use apalis::utils::TokioExecutor; -use apalis::{layers::tracing::TraceLayer, prelude::*}; +use apalis::prelude::*; + use apalis_sql::sqlite::SqliteStorage; use email_service::Email; @@ -39,11 +38,12 @@ async fn main() -> Result<()> { produce_emails(&mut email_storage).await?; - Monitor::::new() - .register_with_count(2, { + Monitor::new() + .register({ WorkerBuilder::new("tasty-banana") - .layer(CatchPanicLayer::new()) - .layer(TraceLayer::new()) + .catch_panic() + .enable_tracing() + .concurrency(2) .backend(email_storage) .build_fn(send_email) }) diff --git a/examples/cron/src/main.rs b/examples/cron/src/main.rs index 4a22dfe5..68264520 100644 --- a/examples/cron/src/main.rs +++ b/examples/cron/src/main.rs @@ -1,12 +1,11 @@ -use apalis::layers::tracing::TraceLayer; use apalis::prelude::*; -use apalis::utils::TokioExecutor; + use apalis_cron::CronStream; use apalis_cron::Schedule; use chrono::{DateTime, Utc}; use std::str::FromStr; use std::time::Duration; -use tower::limit::RateLimitLayer; +// use std::time::Duration; use tower::load_shed::LoadShedLayer; #[derive(Clone)] @@ -32,15 +31,11 @@ async fn send_reminder(job: Reminder, svc: Data) { async fn main() { let schedule = Schedule::from_str("1/1 * * * * *").unwrap(); let worker = WorkerBuilder::new("morning-cereal") - .layer(TraceLayer::new()) + .enable_tracing() .layer(LoadShedLayer::new()) // Important when you have layers that block the service - .layer(RateLimitLayer::new(1, Duration::from_secs(2))) + .rate_limit(1, Duration::from_secs(2)) .data(FakeService) .backend(CronStream::new(schedule)) .build_fn(send_reminder); - Monitor::::new() - .register_with_count(2, worker) - .run() - .await - .unwrap(); + Monitor::new().register(worker).run().await.unwrap(); } diff --git a/examples/fn-args/src/main.rs b/examples/fn-args/src/main.rs index 1e5655f6..99c3949c 100644 --- a/examples/fn-args/src/main.rs +++ b/examples/fn-args/src/main.rs @@ -6,7 +6,7 @@ use std::{ }, }; -use apalis::{prelude::*, utils::TokioExecutor}; +use apalis::prelude::*; use apalis_sql::{ context::SqlContext, sqlite::{SqlitePool, SqliteStorage}, @@ -19,18 +19,17 @@ struct SimpleJob {} // A task can have up to 16 arguments async fn simple_job( - _: SimpleJob, // Required, must be of the type of the job/message - worker_id: Data, // The worker running the job, added by worker - _worker_ctx: Data>, // The worker context, added by worker + _: SimpleJob, // Required, must be of the type of the job/message + worker: Worker, // The worker and its context, added by worker _sqlite: Data>, // The source, added by storage - task_id: TaskId, // The task id, added by storage - attempt: Attempt, // The current attempt - ctx: SqlContext, // The task context provided by the backend - count: Data, // Our custom data added via layer + task_id: TaskId, // The task id, added by storage + attempt: Attempt, // The current attempt + ctx: SqlContext, // The task context provided by the backend + count: Data, // Our custom data added via layer ) { // increment the counter let current = count.fetch_add(1, Ordering::Relaxed); - info!("worker: {worker_id:?}; task_id: {task_id:?}, ctx: {ctx:?}, attempt:{attempt:?} count: {current:?}"); + info!("worker: {worker:?}; task_id: {task_id:?}, ctx: {ctx:?}, attempt:{attempt:?} count: {current:?}"); } async fn produce_jobs(storage: &mut SqliteStorage) { @@ -59,11 +58,12 @@ async fn main() -> Result<(), std::io::Error> { .expect("unable to run migrations for sqlite"); let mut sqlite: SqliteStorage = SqliteStorage::new(pool); produce_jobs(&mut sqlite).await; - Monitor::::new() - .register_with_count(2, { + Monitor::new() + .register({ WorkerBuilder::new("tasty-banana") .data(Count::default()) .data(sqlite.clone()) + .concurrency(2) .backend(sqlite) .build_fn(simple_job) }) diff --git a/examples/graceful-shutdown/src/main.rs b/examples/graceful-shutdown/src/main.rs index 3a2006a5..c0566039 100644 --- a/examples/graceful-shutdown/src/main.rs +++ b/examples/graceful-shutdown/src/main.rs @@ -1,6 +1,6 @@ use std::time::Duration; -use apalis::{prelude::*, utils::TokioExecutor}; +use apalis::prelude::*; use apalis_sql::sqlite::{SqlitePool, SqliteStorage}; use serde::{Deserialize, Serialize}; use tracing::info; @@ -8,15 +8,16 @@ use tracing::info; #[derive(Debug, Serialize, Deserialize)] struct LongRunningJob {} -async fn long_running_task(_task: LongRunningJob, worker_ctx: Context) { +async fn long_running_task(_task: LongRunningJob, worker: Worker) { loop { - tokio::time::sleep(Duration::from_secs(1)).await; // Do some hard thing - info!("is_shutting_down: {}", worker_ctx.is_shutting_down(),); - if worker_ctx.is_shutting_down() { + info!("is_shutting_down: {}", worker.is_shutting_down()); + if worker.is_shutting_down() { info!("saving the job state"); break; } + tokio::time::sleep(Duration::from_secs(3)).await; // Do some hard thing } + info!("Shutdown complete!"); } async fn produce_jobs(storage: &mut SqliteStorage) { @@ -33,14 +34,17 @@ async fn main() -> Result<(), std::io::Error> { .expect("unable to run migrations for sqlite"); let mut sqlite: SqliteStorage = SqliteStorage::new(pool); produce_jobs(&mut sqlite).await; - Monitor::::new() - .register_with_count(2, { + Monitor::new() + .register({ WorkerBuilder::new("tasty-banana") + .concurrency(2) + .enable_tracing() .backend(sqlite) .build_fn(long_running_task) }) - // Wait 10 seconds after shutdown is triggered to allow any incomplete jobs to complete - .shutdown_timeout(Duration::from_secs(10)) + .on_event(|e| info!("{e}")) + // Wait 5 seconds after shutdown is triggered to allow any incomplete jobs to complete + .shutdown_timeout(Duration::from_secs(5)) // Use .run() if you don't want without signals .run_with_signal(tokio::signal::ctrl_c()) // This will wait for ctrl+c then gracefully shutdown .await?; diff --git a/examples/mysql/src/main.rs b/examples/mysql/src/main.rs index 139d1714..cf91e787 100644 --- a/examples/mysql/src/main.rs +++ b/examples/mysql/src/main.rs @@ -1,5 +1,5 @@ use anyhow::Result; -use apalis::layers::tracing::TraceLayer; + use apalis::prelude::*; use apalis_sql::mysql::MySqlPool; use apalis_sql::mysql::MysqlStorage; @@ -33,10 +33,10 @@ async fn main() -> Result<()> { let mysql: MysqlStorage = MysqlStorage::new(pool); produce_jobs(&mysql).await?; - Monitor::new_with_executor(TokioExecutor) - .register_with_count(1, { + Monitor::new() + .register({ WorkerBuilder::new("tasty-avocado") - .layer(TraceLayer::new()) + .enable_tracing() .backend(mysql) .build_fn(send_email) }) diff --git a/examples/postgres/src/main.rs b/examples/postgres/src/main.rs index cece2db3..1b4ba9bb 100644 --- a/examples/postgres/src/main.rs +++ b/examples/postgres/src/main.rs @@ -1,10 +1,9 @@ use anyhow::Result; use apalis::layers::retry::RetryPolicy; -use apalis::layers::tracing::TraceLayer; + use apalis::prelude::*; use apalis_sql::postgres::{PgListen, PgPool, PostgresStorage}; use email_service::{send_email, Email}; -use tower::retry::RetryLayer; use tracing::{debug, info}; async fn produce_jobs(storage: &mut PostgresStorage) -> Result<()> { @@ -44,15 +43,15 @@ async fn main() -> Result<()> { listener.listen().await.unwrap(); }); - Monitor::::new() - .register_with_count(4, { + Monitor::new() + .register({ WorkerBuilder::new("tasty-orange") - .layer(TraceLayer::new()) - .layer(RetryLayer::new(RetryPolicy::retries(5))) + .enable_tracing() + .retry(RetryPolicy::retries(5)) .backend(pg) .build_fn(send_email) }) - .on_event(|e| debug!("{e:?}")) + .on_event(|e| debug!("{e}")) .run_with_signal(async { tokio::signal::ctrl_c().await?; info!("Shutting down the system"); diff --git a/examples/prometheus/src/main.rs b/examples/prometheus/src/main.rs index eaa334a2..0f85eb07 100644 --- a/examples/prometheus/src/main.rs +++ b/examples/prometheus/src/main.rs @@ -48,7 +48,7 @@ async fn main() -> Result<()> { .map_err(|e| std::io::Error::new(std::io::ErrorKind::BrokenPipe, e)) }; let monitor = async { - Monitor::::new() + Monitor::new() .register({ WorkerBuilder::new("tasty-banana") .layer(PrometheusLayer::default()) diff --git a/examples/redis-deadpool/src/main.rs b/examples/redis-deadpool/src/main.rs index 9d625934..74f5829d 100644 --- a/examples/redis-deadpool/src/main.rs +++ b/examples/redis-deadpool/src/main.rs @@ -30,7 +30,7 @@ async fn main() -> Result<()> { .backend(storage) .build_fn(send_email); - Monitor::::new() + Monitor::new() .register(worker) .shutdown_timeout(Duration::from_millis(5000)) .run_with_signal(async { diff --git a/examples/redis-mq-example/src/main.rs b/examples/redis-mq-example/src/main.rs index 5f8f7408..9a73e616 100644 --- a/examples/redis-mq-example/src/main.rs +++ b/examples/redis-mq-example/src/main.rs @@ -1,6 +1,6 @@ use std::{fmt::Debug, marker::PhantomData, time::Duration}; -use apalis::{layers::tracing::TraceLayer, prelude::*}; +use apalis::prelude::*; use apalis_redis::{self, Config}; @@ -14,7 +14,7 @@ use futures::{channel::mpsc, SinkExt}; use rsmq_async::{Rsmq, RsmqConnection, RsmqError}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use tokio::time::sleep; -use tracing::{error, info}; +use tracing::info; struct RedisMq>> { conn: Rsmq, @@ -113,7 +113,7 @@ where type Error = RsmqError; async fn enqueue(&mut self, message: Message) -> Result<(), Self::Error> { - let bytes = C::encode(&Request::::new(message)) + let bytes = C::encode(Request::::new(message)) .map_err(Into::into) .unwrap(); self.conn @@ -173,28 +173,13 @@ async fn main() -> anyhow::Result<()> { produce_jobs(&mut mq).await?; let worker = WorkerBuilder::new("rango-tango") - .layer(TraceLayer::new()) + .enable_tracing() .backend(mq) .build_fn(send_email); - Monitor::::new() - .register_with_count(2, worker) - .on_event(|e| { - let worker_id = e.id(); - match e.inner() { - Event::Start => { - info!("Worker [{worker_id}] started"); - } - Event::Error(e) => { - error!("Worker [{worker_id}] encountered an error: {e}"); - } - - Event::Exit => { - info!("Worker [{worker_id}] exited"); - } - _ => {} - } - }) + Monitor::new() + .register(worker) + .on_event(|e| info!("{e}")) .shutdown_timeout(Duration::from_millis(5000)) .run_with_signal(async { tokio::signal::ctrl_c().await?; diff --git a/examples/redis-with-msg-pack/src/main.rs b/examples/redis-with-msg-pack/src/main.rs index ce5e57a5..61613c50 100644 --- a/examples/redis-with-msg-pack/src/main.rs +++ b/examples/redis-with-msg-pack/src/main.rs @@ -44,7 +44,7 @@ async fn main() -> Result<()> { .backend(storage) .build_fn(send_email); - Monitor::::new() + Monitor::new() .register(worker) .shutdown_timeout(Duration::from_millis(5000)) .run_with_signal(async { diff --git a/examples/redis/src/main.rs b/examples/redis/src/main.rs index 32a16a3f..708825e9 100644 --- a/examples/redis/src/main.rs +++ b/examples/redis/src/main.rs @@ -1,10 +1,8 @@ use std::time::Duration; use anyhow::Result; -use apalis::layers::limit::{ConcurrencyLimitLayer, RateLimitLayer}; -use apalis::layers::tracing::TraceLayer; use apalis::layers::ErrorHandlingLayer; -use apalis::{layers::TimeoutLayer, prelude::*}; +use apalis::prelude::*; use apalis_redis::RedisStorage; use email_service::{send_email, Email}; @@ -36,14 +34,14 @@ async fn main() -> Result<()> { let worker = WorkerBuilder::new("rango-tango") .layer(ErrorHandlingLayer::new()) - .layer(TraceLayer::new()) - .layer(RateLimitLayer::new(5, Duration::from_secs(1))) - .layer(TimeoutLayer::new(Duration::from_millis(500))) - .layer(ConcurrencyLimitLayer::new(2)) + .enable_tracing() + .rate_limit(5, Duration::from_secs(1)) + .timeout(Duration::from_millis(500)) + .concurrency(2) .backend(storage) .build_fn(send_email); - Monitor::::new() + Monitor::new() .register(worker) .on_event(|e| { let worker_id = e.id(); diff --git a/examples/sentry/src/main.rs b/examples/sentry/src/main.rs index 39b96353..083734cc 100644 --- a/examples/sentry/src/main.rs +++ b/examples/sentry/src/main.rs @@ -6,7 +6,7 @@ use std::time::Duration; use tracing_subscriber::prelude::*; use anyhow::Result; -use apalis::layers::tracing::TraceLayer; + use apalis::{layers::sentry::SentryLayer, prelude::*}; use apalis_redis::RedisStorage; use email_service::Email; @@ -132,12 +132,13 @@ async fn main() -> Result<()> { //This can be in another part of the program produce_jobs(storage.clone()).await?; - Monitor::::new() - .register_with_count(2, { + Monitor::new() + .register({ WorkerBuilder::new("tasty-avocado") .layer(NewSentryLayer::new_from_top()) .layer(SentryLayer::new()) - .layer(TraceLayer::new()) + .enable_tracing() + .concurrency(2) .backend(storage.clone()) .build_fn(email_service) }) diff --git a/examples/sqlite/src/main.rs b/examples/sqlite/src/main.rs index 802a4ff7..0b43210a 100644 --- a/examples/sqlite/src/main.rs +++ b/examples/sqlite/src/main.rs @@ -1,8 +1,8 @@ mod job; use anyhow::Result; -use apalis::utils::TokioExecutor; -use apalis::{layers::tracing::TraceLayer, prelude::*}; +use apalis::prelude::*; + use apalis_sql::sqlite::SqliteStorage; use chrono::Utc; use email_service::{send_email, Email}; @@ -58,16 +58,16 @@ async fn main() -> Result<()> { produce_notifications(¬ification_storage).await?; - Monitor::::new() + Monitor::new() .register({ WorkerBuilder::new("tasty-banana") - .layer(TraceLayer::new()) + .enable_tracing() .backend(email_storage) .build_fn(send_email) }) .register({ WorkerBuilder::new("tasty-mango") - // .layer(TraceLayer::new()) + // .enable_tracing() .backend(notification_storage) .build_fn(job::notify) }) diff --git a/examples/tracing/Cargo.toml b/examples/tracing/Cargo.toml index 07f5159a..3f15f84d 100644 --- a/examples/tracing/Cargo.toml +++ b/examples/tracing/Cargo.toml @@ -10,7 +10,7 @@ anyhow = "1" apalis = { path = "../../" } apalis-redis = { path = "../../packages/apalis-redis" } serde = "1" -tokio = { version = "1", features = ["macros"] } +tokio = { version = "1", features = ["full"] } env_logger = "0.10" tracing-subscriber = { version = "0.3.11", features = ["env-filter", "json"] } chrono = { version = "0.4", default-features = false, features = ["clock"] } diff --git a/examples/tracing/src/main.rs b/examples/tracing/src/main.rs index 5e778692..30784796 100644 --- a/examples/tracing/src/main.rs +++ b/examples/tracing/src/main.rs @@ -1,10 +1,6 @@ use anyhow::Result; - -use apalis::layers::tracing::TraceLayer; -use apalis::{ - prelude::{Monitor, Storage, WorkerBuilder, WorkerFactoryFn}, - utils::TokioExecutor, -}; +use apalis::layers::WorkerBuilderExt; +use apalis::prelude::{Monitor, Storage, WorkerBuilder, WorkerFactoryFn}; use apalis_redis::RedisStorage; use std::error::Error; use std::fmt; @@ -69,10 +65,10 @@ async fn main() -> Result<()> { //This can be in another part of the program produce_jobs(storage.clone()).await?; - Monitor::::new() + Monitor::new() .register( WorkerBuilder::new("tasty-avocado") - .chain(|srv| srv.layer(TraceLayer::new())) + .enable_tracing() .backend(storage) .build_fn(email_service), ) diff --git a/examples/unmonitored-worker/src/main.rs b/examples/unmonitored-worker/src/main.rs index 7c14a1bc..7bafa856 100644 --- a/examples/unmonitored-worker/src/main.rs +++ b/examples/unmonitored-worker/src/main.rs @@ -1,23 +1,36 @@ use std::time::Duration; -use apalis::{prelude::*, utils::TokioExecutor}; +use apalis::prelude::*; use apalis_sql::sqlite::{SqlitePool, SqliteStorage}; use serde::{Deserialize, Serialize}; use tracing::info; #[derive(Debug, Serialize, Deserialize)] -struct SelfMonitoringJob {} +struct SelfMonitoringJob { + id: i32, +} -async fn self_monitoring_task(task: SelfMonitoringJob, worker_ctx: Context) { - info!("task: {:?}, {:?}", task, worker_ctx); - tokio::time::sleep(Duration::from_secs(5)).await; // Do some hard thing - info!("done with task, stopping worker gracefully"); - // use worker_ctx.force_stop() to stop immediately - worker_ctx.stop(); +async fn self_monitoring_task(task: SelfMonitoringJob, worker: Worker) { + info!("task: {:?}, {:?}", task, worker); + if task.id == 1 { + tokio::spawn(async move { + loop { + tokio::time::sleep(Duration::from_secs(1)).await; + if !worker.has_pending_tasks() { + info!("done with all tasks, stopping worker"); + worker.stop(); + break; + } + } + }); + } + tokio::time::sleep(Duration::from_secs(5)).await; } async fn produce_jobs(storage: &mut SqliteStorage) { - storage.push(SelfMonitoringJob {}).await.unwrap(); + for id in 0..100 { + storage.push(SelfMonitoringJob { id }).await.unwrap(); + } } #[tokio::main] @@ -32,9 +45,10 @@ async fn main() -> Result<(), std::io::Error> { produce_jobs(&mut sqlite).await; WorkerBuilder::new("tasty-banana") + .concurrency(2) .backend(sqlite) .build_fn(self_monitoring_task) - .with_executor(TokioExecutor) + .on_event(|e| info!("{e}")) .run() .await; Ok(()) diff --git a/packages/apalis-core/Cargo.toml b/packages/apalis-core/Cargo.toml index 7f2424f8..d2e4d319 100644 --- a/packages/apalis-core/Cargo.toml +++ b/packages/apalis-core/Cargo.toml @@ -17,7 +17,6 @@ serde = { version = "1.0", features = ["derive"] } futures = { version = "0.3.30", features = ["async-await"] } tower = { version = "0.4", features = ["util"], default-features = false } pin-project-lite = "0.2.14" -async-oneshot = "0.5.9" thiserror = "1.0.59" ulid = { version = "1.1.2", default-features = false, features = ["std"] } futures-timer = { version = "3.0.3", optional = true } diff --git a/packages/apalis-core/src/builder.rs b/packages/apalis-core/src/builder.rs index bc9bd21f..aca69c91 100644 --- a/packages/apalis-core/src/builder.rs +++ b/packages/apalis-core/src/builder.rs @@ -96,7 +96,7 @@ impl WorkerBuilder { /// Allows adding multiple [`tower`] middleware pub fn chain( self, - f: impl Fn(ServiceBuilder) -> ServiceBuilder, + f: impl FnOnce(ServiceBuilder) -> ServiceBuilder, ) -> WorkerBuilder { let middleware = f(self.layer); diff --git a/packages/apalis-core/src/executor.rs b/packages/apalis-core/src/executor.rs deleted file mode 100644 index 58919641..00000000 --- a/packages/apalis-core/src/executor.rs +++ /dev/null @@ -1,7 +0,0 @@ -use futures::Future; - -/// An Executor that is used to spawn futures -pub trait Executor { - /// Spawns a new asynchronous task - fn spawn(&self, future: impl Future + Send + 'static); -} diff --git a/packages/apalis-core/src/lib.rs b/packages/apalis-core/src/lib.rs index 42dbe88d..f5475bb6 100644 --- a/packages/apalis-core/src/lib.rs +++ b/packages/apalis-core/src/lib.rs @@ -33,8 +33,6 @@ use worker::WorkerId; pub mod builder; /// Includes all possible error types. pub mod error; -/// Represents an executor. -pub mod executor; /// Represents middleware offered through [`tower`] pub mod layers; /// Represents monitoring of running workers @@ -161,17 +159,6 @@ pub mod interval { } } -#[cfg(test)] -#[doc(hidden)] -#[derive(Debug, Default, Clone)] -pub(crate) struct TestExecutor; -#[cfg(test)] -impl crate::executor::Executor for TestExecutor { - fn spawn(&self, future: impl futures::prelude::Future + Send + 'static) { - tokio::spawn(future); - } -} - #[cfg(feature = "test-utils")] /// Test utilities that allows you to test backends pub mod test_utils { diff --git a/packages/apalis-core/src/memory.rs b/packages/apalis-core/src/memory.rs index 731c4505..56459927 100644 --- a/packages/apalis-core/src/memory.rs +++ b/packages/apalis-core/src/memory.rs @@ -105,7 +105,7 @@ impl Backend, Res> for MemoryStora let stream = self.inner.map(|r| Ok(Some(r))).boxed(); Poller { stream: BackendStream::new(stream, self.controller), - heartbeat: Box::pin(async {}), + heartbeat: Box::pin(futures::future::pending()), layer: Identity::new(), } } diff --git a/packages/apalis-core/src/monitor/mod.rs b/packages/apalis-core/src/monitor/mod.rs index 92e0e453..4f164eeb 100644 --- a/packages/apalis-core/src/monitor/mod.rs +++ b/packages/apalis-core/src/monitor/mod.rs @@ -1,105 +1,67 @@ use std::{ - any::Any, fmt::{self, Debug, Formatter}, - sync::{Arc, RwLock}, + sync::Arc, }; use futures::{future::BoxFuture, Future, FutureExt}; use serde::Serialize; use tower::{Layer, Service}; -mod shutdown; + +/// Shutdown utilities +pub mod shutdown; use crate::{ error::BoxDynError, - executor::Executor, request::Request, - worker::{Context, Event, Ready, Worker}, + worker::{Context, Event, EventHandler, Ready, Worker, WorkerId}, Backend, }; use self::shutdown::Shutdown; /// A monitor for coordinating and managing a collection of workers. -pub struct Monitor { - workers: Vec>>, - executor: E, - context: MonitorContext, +pub struct Monitor { + futures: Vec>, + workers: Vec>, terminator: Option>, -} - -/// The internal context of a [Monitor] -/// Usually shared with multiple workers -#[derive(Clone)] -pub struct MonitorContext { - #[allow(clippy::type_complexity)] - event_handler: Arc) + Send + Sync>>>>, shutdown: Shutdown, + event_handler: EventHandler, } -impl fmt::Debug for MonitorContext { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("MonitorContext") - .field("events", &self.event_handler.type_id()) - .field("shutdown", &"[Shutdown]") - .finish() - } -} - -impl MonitorContext { - fn new() -> MonitorContext { - Self { - event_handler: Arc::default(), - shutdown: Shutdown::new(), - } - } - - /// Get the shutdown handle - pub fn shutdown(&self) -> &Shutdown { - &self.shutdown - } - /// Get the events handle - pub fn notify(&self, event: Worker) { - let _ = self - .event_handler - .as_ref() - .read() - .map(|caller| caller.as_ref().map(|caller| caller(event))); - } -} - -impl Debug for Monitor { +impl Debug for Monitor { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("Monitor") .field("shutdown", &"[Graceful shutdown listener]") - .field("workers", &self.workers) - .field("executor", &std::any::type_name::()) + .field("workers", &self.futures.len()) .finish() } } -impl Monitor { +impl Monitor { /// Registers a single instance of a [Worker] - pub fn register(mut self, worker: Worker>) -> Self + pub fn register(mut self, mut worker: Worker>) -> Self where + S: Service, Response = Res> + Send + 'static, S::Future: Send, - S::Response: 'static + Send + Sync + Serialize, + S::Response: Send + Sync + Serialize + 'static, S::Error: Send + Sync + 'static + Into, + P: Backend, Res> + Send + 'static, P::Stream: Unpin + Send + 'static, - P::Layer: Layer, - >::Service: Service, Response = Res>, - >::Service: Send, + P::Layer: Layer + Send, + >::Service: Service, Response = Res> + Send, <>::Service as Service>>::Future: Send, <>::Service as Service>>::Error: - Send + Into + Sync, - S: Service, Response = Res> + Send + 'static, - Ctx: Send + Sync + 'static, + Send + Sync + Into, Req: Send + Sync + 'static, - P: Backend, Res> + 'static, - Res: 'static, Ctx: Send + Sync + 'static, + Res: 'static, { - self.workers.push(worker.with_monitor(&self)); - + worker.state.shutdown = Some(self.shutdown.clone()); + worker.state.event_handler = self.event_handler.clone(); + let runnable = worker.run(); + let handle = runnable.get_handle(); + self.workers.push(handle); + self.futures.push(runnable.boxed()); self } @@ -113,33 +75,37 @@ impl Monitor { /// # Returns /// /// The monitor instance, with all workers added to the collection. + #[deprecated( + since = "0.6.0", + note = "Consider using the `.register` as workers now offer concurrency by default" + )] pub fn register_with_count( mut self, count: usize, worker: Worker>, ) -> Self where + S: Service, Response = Res> + Send + 'static + Clone, S::Future: Send, - S::Response: 'static + Send + Sync + Serialize, + S::Response: Send + Sync + Serialize + 'static, S::Error: Send + Sync + 'static + Into, + P: Backend, Res> + Send + 'static + Clone, P::Stream: Unpin + Send + 'static, - P::Layer: Layer, - P: Backend, Res> + 'static, - >::Service: Service, Response = Res>, - >::Service: Send, + P::Layer: Layer + Send, + >::Service: Service, Response = Res> + Send, <>::Service as Service>>::Future: Send, <>::Service as Service>>::Error: - Send + Into + Sync, - S: Service, Response = Res> + Send + 'static, - Ctx: Send + Sync + 'static, + Send + Sync + Into, Req: Send + Sync + 'static, - S: Service> + Send + 'static, - P: Backend, Res> + 'static, - Res: 'static, Ctx: Send + Sync + 'static, + Res: 'static, { - let workers = worker.with_monitor_instances(count, &self); - self.workers.extend(workers); + for index in 0..count { + let mut worker = worker.clone(); + let name = format!("{}-{index}", worker.id()); + worker.id = WorkerId::new(name); + self = self.register(worker); + } self } /// Runs the monitor and all its registered workers until they have all completed or a shutdown signal is received. @@ -151,19 +117,35 @@ impl Monitor { /// # Errors /// /// If the monitor fails to shutdown gracefully, an `std::io::Error` will be returned. + /// + /// # Remarks + /// + /// If a timeout has been set using the `Monitor::shutdown_timeout` method, the monitor + /// will wait for all workers to complete up to the timeout duration before exiting. + /// If the timeout is reached and workers have not completed, the monitor will exit forcefully. - pub async fn run_with_signal>>( - self, - signal: S, - ) -> std::io::Result<()> + pub async fn run_with_signal(self, signal: S) -> std::io::Result<()> where - E: Executor + Clone + Send + 'static, + S: Send + Future>, { - let shutdown = self.context.shutdown.clone(); - let shutdown_after = self.context.shutdown.shutdown_after(signal); - let runner = self.run(); - futures::try_join!(shutdown_after, runner)?; - shutdown.await; + let shutdown = self.shutdown.clone(); + let shutdown_after = self.shutdown.shutdown_after(signal); + if let Some(terminator) = self.terminator { + let _res = futures::future::select( + futures::future::join_all(self.futures) + .map(|_| shutdown.start_shutdown()) + .boxed(), + async { + let _res = shutdown_after.await; + terminator.await; + } + .boxed(), + ) + .await; + } else { + let runner = self.run(); + let _res = futures::join!(shutdown_after, runner); // If no terminator is provided, we wait for both the shutdown call and all workers to complete + } Ok(()) } @@ -171,103 +153,51 @@ impl Monitor { /// /// # Errors /// - /// If the monitor fails to shutdown gracefully, an `std::io::Error` will be returned. + /// If the monitor fails to run gracefully, an `std::io::Error` will be returned. /// /// # Remarks /// - /// If a timeout has been set using the `shutdown_timeout` method, the monitor - /// will wait for all workers to complete up to the timeout duration before exiting. - /// If the timeout is reached and workers have not completed, the monitor will exit forcefully. - pub async fn run(self) -> std::io::Result<()> - where - E: Executor + Clone + Send + 'static, - { - let mut futures = Vec::new(); - for worker in self.workers { - futures.push(worker.run().boxed()); - } - let shutdown_future = self.context.shutdown.boxed().map(|_| ()); - if let Some(terminator) = self.terminator { - let runner = futures::future::select( - futures::future::join_all(futures).map(|_| ()), - shutdown_future, - ); - futures::join!(runner, terminator); - } else { - futures::join!( - futures::future::join_all(futures).map(|_| ()), - shutdown_future, - ); - } + /// If all workers have completed execution, then by default the monitor will start a shutdown + pub async fn run(self) -> std::io::Result<()> { + let shutdown = self.shutdown.clone(); + let shutdown_future = self.shutdown.boxed().map(|_| ()); + futures::join!( + futures::future::join_all(self.futures).map(|_| shutdown.start_shutdown()), + shutdown_future, + ); + Ok(()) } /// Handles events emitted pub fn on_event) + Send + Sync + 'static>(self, f: F) -> Self { - let _ = self.context.event_handler.write().map(|mut res| { + let _ = self.event_handler.write().map(|mut res| { let _ = res.insert(Box::new(f)); }); self } - /// Get the current executor - pub fn executor(&self) -> &E { - &self.executor - } - - pub(crate) fn context(&self) -> &MonitorContext { - &self.context - } } -impl Default for Monitor { +impl Default for Monitor { fn default() -> Self { Self { - executor: E::default(), - context: MonitorContext::new(), - workers: Vec::new(), + shutdown: Shutdown::new(), + futures: Vec::new(), terminator: None, + event_handler: Arc::default(), + workers: Vec::new(), } } } -impl Monitor { +impl Monitor { /// Creates a new monitor instance. /// /// # Returns /// /// A new monitor instance, with an empty collection of workers. - pub fn new() -> Self - where - E: Default, - { - Self::new_with_executor(E::default()) - } - /// Creates a new monitor instance with an executor - /// - /// # Returns - /// - /// A new monitor instance, with an empty collection of workers. - pub fn new_with_executor(executor: E) -> Self { - Self { - context: MonitorContext::new(), - workers: Vec::new(), - executor, - terminator: None, - } - } - - /// Sets a custom executor for the monitor, allowing the usage of another runtime apart from Tokio. - /// The executor must implement the `Executor` trait. - pub fn set_executor(self, executor: NE) -> Monitor { - if !self.workers.is_empty() { - panic!("Tried changing executor when already loaded some workers"); - } - Monitor { - context: self.context, - workers: Vec::new(), - executor, - terminator: self.terminator, - } + pub fn new() -> Self { + Self::default() } /// Sets a timeout duration for the monitor's shutdown process. @@ -312,7 +242,6 @@ mod tests { request::Request, test_message_queue, test_utils::TestWrapper, - TestExecutor, }; test_message_queue!(MemoryStorage::new()); @@ -334,12 +263,12 @@ mod tests { let worker = WorkerBuilder::new("rango-tango") .backend(backend) .build(service); - let monitor: Monitor = Monitor::new(); + let monitor: Monitor = Monitor::new(); let monitor = monitor.register(worker); - let shutdown = monitor.context.shutdown.clone(); + let shutdown = monitor.shutdown.clone(); tokio::spawn(async move { sleep(Duration::from_millis(1500)).await; - shutdown.shutdown(); + shutdown.start_shutdown(); }); monitor.run().await.unwrap(); } @@ -360,16 +289,16 @@ mod tests { let worker = WorkerBuilder::new("rango-tango") .backend(backend) .build(service); - let monitor: Monitor = Monitor::new(); + let monitor: Monitor = Monitor::new(); let monitor = monitor.on_event(|e| { println!("{e:?}"); }); - let monitor = monitor.register_with_count(5, worker); - assert_eq!(monitor.workers.len(), 5); - let shutdown = monitor.context.shutdown.clone(); + let monitor = monitor.register(worker); + assert_eq!(monitor.futures.len(), 1); + let shutdown = monitor.shutdown.clone(); tokio::spawn(async move { sleep(Duration::from_millis(1000)).await; - shutdown.shutdown(); + shutdown.start_shutdown(); }); let result = monitor.run().await; diff --git a/packages/apalis-core/src/monitor/shutdown.rs b/packages/apalis-core/src/monitor/shutdown.rs index 83d188a3..ce315545 100644 --- a/packages/apalis-core/src/monitor/shutdown.rs +++ b/packages/apalis-core/src/monitor/shutdown.rs @@ -16,17 +16,19 @@ pub struct Shutdown { } impl Shutdown { + /// Create a new shutdown handle pub fn new() -> Shutdown { Shutdown { inner: Arc::new(ShutdownCtx::new()), } } + /// Set the future to await before shutting down pub fn shutdown_after(&self, f: F) -> impl Future { let handle = self.clone(); async move { let result = f.await; - handle.shutdown(); + handle.start_shutdown(); result } } @@ -51,7 +53,6 @@ impl ShutdownCtx { } } fn shutdown(&self) { - // Set the shutdown state to true self.state.store(true, Ordering::Relaxed); self.wake(); } @@ -68,11 +69,13 @@ impl ShutdownCtx { } impl Shutdown { + /// Check if the system is shutting down pub fn is_shutting_down(&self) -> bool { self.inner.is_shutting_down() } - pub fn shutdown(&self) { + /// Start the shutdown process + pub fn start_shutdown(&self) { self.inner.shutdown() } } diff --git a/packages/apalis-core/src/poller/mod.rs b/packages/apalis-core/src/poller/mod.rs index 03c17313..5d5554db 100644 --- a/packages/apalis-core/src/poller/mod.rs +++ b/packages/apalis-core/src/poller/mod.rs @@ -1,8 +1,5 @@ use futures::{future::BoxFuture, Future, FutureExt}; -use std::{ - fmt::{self, Debug}, - ops::{Deref, DerefMut}, -}; +use std::fmt::{self, Debug}; use tower::layer::util::Identity; /// Util for controlling pollers @@ -58,28 +55,3 @@ where const STOPPED: usize = 2; const PLUGGED: usize = 1; const UNPLUGGED: usize = 0; - -/// Tells the poller that the worker is ready for a new request -#[derive(Debug)] -pub struct FetchNext { - sender: async_oneshot::Sender, -} - -impl Deref for FetchNext { - type Target = async_oneshot::Sender; - fn deref(&self) -> &Self::Target { - &self.sender - } -} - -impl DerefMut for FetchNext { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.sender - } -} -impl FetchNext { - /// Generate a new instance of ready - pub fn new(sender: async_oneshot::Sender) -> Self { - Self { sender } - } -} diff --git a/packages/apalis-core/src/worker/buffer/error.rs b/packages/apalis-core/src/worker/buffer/error.rs deleted file mode 100644 index a1da124d..00000000 --- a/packages/apalis-core/src/worker/buffer/error.rs +++ /dev/null @@ -1,68 +0,0 @@ -//! Error types for the `Buffer` middleware. - -use std::{fmt, sync::Arc}; -use tower::BoxError; - -/// An error produced by a [`Service`] wrapped by a [`Buffer`] -/// -/// [`Service`]: crate::Service -/// [`Buffer`]: crate::buffer::Buffer -#[derive(Debug)] -pub(crate) struct ServiceError { - inner: Arc, -} - -/// An error produced when the a buffer's worker closes unexpectedly. -pub(crate) struct Closed { - _p: (), -} - -// ===== impl ServiceError ===== - -impl ServiceError { - pub(crate) fn new(inner: BoxError) -> ServiceError { - let inner = Arc::new(inner); - ServiceError { inner } - } - - // Private to avoid exposing `Clone` trait as part of the public API - pub(crate) fn clone(&self) -> ServiceError { - ServiceError { - inner: self.inner.clone(), - } - } -} - -impl fmt::Display for ServiceError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(fmt, "buffered service failed: {}", self.inner) - } -} - -impl std::error::Error for ServiceError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - Some(&**self.inner) - } -} - -// ===== impl Closed ===== - -impl Closed { - pub(crate) fn new() -> Self { - Closed { _p: () } - } -} - -impl fmt::Debug for Closed { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.debug_tuple("Closed").finish() - } -} - -impl fmt::Display for Closed { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt.write_str("buffer's worker closed unexpectedly") - } -} - -impl std::error::Error for Closed {} diff --git a/packages/apalis-core/src/worker/buffer/future.rs b/packages/apalis-core/src/worker/buffer/future.rs deleted file mode 100644 index 8cf3baea..00000000 --- a/packages/apalis-core/src/worker/buffer/future.rs +++ /dev/null @@ -1,79 +0,0 @@ -//! Future types for the [`Buffer`] middleware. -//! -//! [`Buffer`]: crate::buffer::Buffer - -use super::{error::Closed, message}; -use futures::ready; -use pin_project_lite::pin_project; -use std::{ - future::Future, - pin::Pin, - task::{Context, Poll}, -}; - -pin_project! { - /// Future that completes when the buffered service eventually services the submitted request. - #[derive(Debug)] - pub struct ResponseFuture { - #[pin] - state: ResponseState, - } -} - -pin_project! { - #[project = ResponseStateProj] - #[derive(Debug)] - enum ResponseState { - Failed { - error: Option, - }, - Rx { - #[pin] - rx: message::Rx, - }, - Poll { - #[pin] - fut: T, - }, - } -} - -impl ResponseFuture { - pub(crate) fn new(rx: message::Rx) -> Self { - ResponseFuture { - state: ResponseState::Rx { rx }, - } - } - - pub(crate) fn failed(err: tower::BoxError) -> Self { - ResponseFuture { - state: ResponseState::Failed { error: Some(err) }, - } - } -} - -impl Future for ResponseFuture -where - F: Future>, - E: Into, -{ - type Output = Result; - - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let mut this = self.project(); - - loop { - match this.state.as_mut().project() { - ResponseStateProj::Failed { error } => { - return Poll::Ready(Err(error.take().expect("polled after error"))); - } - ResponseStateProj::Rx { rx } => match ready!(rx.poll(cx)) { - Ok(Ok(fut)) => this.state.set(ResponseState::Poll { fut }), - Ok(Err(e)) => return Poll::Ready(Err(e.into())), - Err(_) => return Poll::Ready(Err(Closed::new().into())), - }, - ResponseStateProj::Poll { fut } => return fut.poll(cx).map_err(Into::into), - } - } - } -} diff --git a/packages/apalis-core/src/worker/buffer/message.rs b/packages/apalis-core/src/worker/buffer/message.rs deleted file mode 100644 index 02863a2d..00000000 --- a/packages/apalis-core/src/worker/buffer/message.rs +++ /dev/null @@ -1,16 +0,0 @@ -use futures::channel::oneshot; - -use super::error::ServiceError; - -/// Message sent over buffer -#[derive(Debug)] -pub(crate) struct Message { - pub(crate) request: Request, - pub(crate) tx: Tx, -} - -/// Response sender -pub(crate) type Tx = oneshot::Sender>; - -/// Response receiver -pub(crate) type Rx = oneshot::Receiver>; diff --git a/packages/apalis-core/src/worker/buffer/mod.rs b/packages/apalis-core/src/worker/buffer/mod.rs deleted file mode 100644 index c341f07d..00000000 --- a/packages/apalis-core/src/worker/buffer/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub(crate) mod error; -pub(crate) mod future; -pub(crate) mod message; -pub(crate) mod service; -pub(crate) mod worker; diff --git a/packages/apalis-core/src/worker/buffer/service.rs b/packages/apalis-core/src/worker/buffer/service.rs deleted file mode 100644 index a4a2764e..00000000 --- a/packages/apalis-core/src/worker/buffer/service.rs +++ /dev/null @@ -1,149 +0,0 @@ -use super::{ - future::ResponseFuture, - message::Message, - worker::{Handle, Worker}, -}; - -use futures::channel::{mpsc, oneshot}; -use futures::task::AtomicWaker; -use std::{ - future::Future, - task::{Context, Poll}, -}; -use std::{marker::PhantomData, sync::Arc}; -use tower::Service; - -/// Adds an mpsc buffer in front of an inner service. -/// -/// See the module documentation for more details. -#[derive(Debug)] -pub struct Buffer { - tx: PollSender>, - handle: Handle, - res: PhantomData, -} - -impl Buffer -where - F: 'static, -{ - /// Creates a new [`Buffer`] wrapping `service`, but returns the background worker. - /// - /// This is useful if you do not want to spawn directly onto the runtime - /// but instead want to use your own executor. This will return the [`Buffer`] and - /// the background `Worker` that you can then spawn. - pub fn pair(service: S, bound: usize) -> (Self, Worker) - where - S: Service + Send + 'static, - F: Send, - S::Error: Into + Send + Sync, - Req: Send + 'static, - { - let (tx, rx) = mpsc::channel(bound); - let (handle, worker) = Worker::new(service, rx); - let buffer = Self { - tx: PollSender::new(tx), - handle, - res: PhantomData, - }; - (buffer, worker) - } - - fn get_worker_error(&self) -> tower::BoxError { - self.handle.get_error_on_closed() - } -} - -impl Service for Buffer -where - F: Future> + Send + 'static, - E: Into, - Req: Send + 'static, -{ - type Response = Res; - type Error = tower::BoxError; - type Future = ResponseFuture; - - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - // First, check if the worker is still alive. - if self.tx.is_closed() { - // If the inner service has errored, then we error here. - return Poll::Ready(Err(self.get_worker_error())); - } - - // Poll the sender to acquire a permit. - self.tx - .poll_reserve(cx) - .map_err(|_| self.get_worker_error()) - } - - fn call(&mut self, request: Req) -> Self::Future { - let (tx, rx) = oneshot::channel(); - match self.tx.send_item(Message { request, tx }) { - Ok(_) => ResponseFuture::new(rx), - Err(_) => ResponseFuture::failed(self.get_worker_error()), - } - } -} - -impl Clone for Buffer -where - Req: Send + 'static, - F: Send + 'static, -{ - fn clone(&self) -> Self { - Self { - handle: self.handle.clone(), - tx: self.tx.clone(), - res: PhantomData, - } - } -} - -// PollSender implementation using futures and async-channel -#[derive(Debug)] -struct PollSender { - tx: mpsc::Sender, - waker: Arc, -} - -impl PollSender { - fn new(tx: mpsc::Sender) -> Self { - Self { - tx, - waker: Arc::new(AtomicWaker::new()), - } - } - - fn poll_reserve(&mut self, cx: &mut Context<'_>) -> Poll> { - if self.tx.is_closed() { - return Poll::Ready(Err(())); - } - - self.waker.register(cx.waker()); - - self.tx.poll_ready(cx).map(|res| match res { - Ok(_) => Ok(()), - Err(_) => Err(()), - }) - } - - fn send_item(&mut self, item: T) -> Result<(), ()> { - if self.tx.is_closed() { - return Err(()); - } - - self.tx.try_send(item).map_err(|_| ()) - } - - fn is_closed(&self) -> bool { - self.tx.is_closed() - } - - fn clone(&self) -> Self { - Self { - tx: self.tx.clone(), - waker: self.waker.clone(), - } - } -} diff --git a/packages/apalis-core/src/worker/buffer/worker.rs b/packages/apalis-core/src/worker/buffer/worker.rs deleted file mode 100644 index 1ace6f26..00000000 --- a/packages/apalis-core/src/worker/buffer/worker.rs +++ /dev/null @@ -1,184 +0,0 @@ -use super::{ - error::{Closed, ServiceError}, - message::Message, -}; -use futures::{channel::mpsc, ready, Stream}; -use std::sync::{Arc, Mutex}; -use std::{ - future::Future, - pin::Pin, - task::{Context, Poll}, -}; - -use tower::Service; - -pin_project_lite::pin_project! { - #[derive(Debug)] - pub struct Worker - where - T: Service, - { - current_message: Option>, - rx: mpsc::Receiver>, - service: T, - finish: bool, - failed: Option, - handle: Handle, - } -} - -/// Get the error out -#[derive(Debug)] -pub(crate) struct Handle { - inner: Arc>>, -} - -impl Worker -where - T: Service, - T::Error: Into, -{ - pub(crate) fn new( - service: T, - rx: mpsc::Receiver>, - ) -> (Handle, Worker) { - let handle = Handle { - inner: Arc::new(Mutex::new(None)), - }; - - let worker = Worker { - current_message: None, - finish: false, - failed: None, - rx, - service, - handle: handle.clone(), - }; - - (handle, worker) - } - - /// Return the next queued Message that hasn't been canceled. - /// - /// If a `Message` is returned, the `bool` is true if this is the first time we received this - /// message, and false otherwise (i.e., we tried to forward it to the backing service before). - #[allow(clippy::type_complexity)] - fn poll_next_msg( - &mut self, - cx: &mut Context<'_>, - ) -> Poll, bool)>> { - if self.finish { - // We've already received None and are shutting down - return Poll::Ready(None); - } - - // tracing::trace!("worker polling for next message"); - if let Some(msg) = self.current_message.take() { - // If the oneshot sender is closed, then the receiver is dropped, - // and nobody cares about the response. If this is the case, we - // should continue to the next request. - if !msg.tx.is_canceled() { - // tracing::trace!("resuming buffered request"); - return Poll::Ready(Some((msg, false))); - } - - // tracing::trace!("dropping cancelled buffered request"); - } - - // Get the next request - while let Some(msg) = ready!(Pin::new(&mut self.rx).poll_next(cx)) { - if !msg.tx.is_canceled() { - // tracing::trace!("processing new request"); - return Poll::Ready(Some((msg, true))); - } - // Otherwise, request is canceled, so pop the next one. - // tracing::trace!("dropping cancelled request"); - } - - Poll::Ready(None) - } - - fn failed(&mut self, error: tower::BoxError) { - let error = ServiceError::new(error); - - let mut inner = self.handle.inner.lock().unwrap(); - - if inner.is_some() { - return; - } - - *inner = Some(error.clone()); - drop(inner); - - self.rx.close(); - self.failed = Some(error); - } -} - -impl Future for Worker -where - T: Service, - T::Error: Into, -{ - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - if self.finish { - return Poll::Ready(()); - } - - loop { - match ready!(self.poll_next_msg(cx)) { - Some((msg, _)) => { - if let Some(ref failed) = self.failed { - let _ = msg.tx.send(Err(failed.clone())); - continue; - } - match self.service.poll_ready(cx) { - Poll::Ready(Ok(())) => { - let response = self.service.call(msg.request); - let _ = msg.tx.send(Ok(response)); - } - Poll::Pending => { - self.current_message = Some(msg); - return Poll::Pending; - } - Poll::Ready(Err(e)) => { - let error = e.into(); - self.failed(error); - let _ = msg.tx.send(Err(self - .failed - .as_ref() - .expect("Worker::failed did not set self.failed?") - .clone())); - } - } - } - None => { - // No more more requests _ever_. - self.finish = true; - return Poll::Ready(()); - } - } - } - } -} - -impl Handle { - pub(crate) fn get_error_on_closed(&self) -> tower::BoxError { - self.inner - .lock() - .unwrap() - .as_ref() - .map(|svc_err| svc_err.clone().into()) - .unwrap_or_else(|| Closed::new().into()) - } -} - -impl Clone for Handle { - fn clone(&self) -> Handle { - Handle { - inner: self.inner.clone(), - } - } -} diff --git a/packages/apalis-core/src/worker/mod.rs b/packages/apalis-core/src/worker/mod.rs index d0908c29..2557b31e 100644 --- a/packages/apalis-core/src/worker/mod.rs +++ b/packages/apalis-core/src/worker/mod.rs @@ -1,14 +1,13 @@ -use self::stream::WorkerStream; use crate::error::{BoxDynError, Error}; -use crate::executor::Executor; use crate::layers::extensions::Data; -use crate::monitor::{Monitor, MonitorContext}; -use crate::notify::Notify; -use crate::poller::FetchNext; +use crate::monitor::shutdown::Shutdown; use crate::request::Request; +use crate::service_fn::FromRequest; +use crate::task::task_id::TaskId; use crate::Backend; -use futures::future::Shared; -use futures::{Future, FutureExt}; +use futures::future::{join, select, BoxFuture}; +use futures::stream::BoxStream; +use futures::{Future, FutureExt, Stream, StreamExt}; use pin_project_lite::pin_project; use serde::{Deserialize, Serialize}; use std::fmt::Debug; @@ -17,72 +16,32 @@ use std::ops::{Deref, DerefMut}; use std::pin::Pin; use std::str::FromStr; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; -use std::sync::{Arc, Mutex}; +use std::sync::{Arc, Mutex, RwLock}; use std::task::{Context as TaskCtx, Poll, Waker}; use thiserror::Error; -use tower::{Layer, Service, ServiceBuilder, ServiceExt}; - -mod buffer; -mod stream; - -pub use buffer::service::Buffer; - -// By default a worker starts 3 futures, one for polling, one for worker stream and the other for consuming. -const WORKER_FUTURES: usize = 3; - -type WorkerNotify = Notify>>; +use tower::util::CallAllUnordered; +use tower::{Layer, Service, ServiceBuilder}; /// A worker name wrapper usually used by Worker builder #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct WorkerId { name: String, - instance: Option, } +/// An event handler for [`Worker`] +pub type EventHandler = Arc) + Send + Sync>>>>; + impl FromStr for WorkerId { type Err = (); fn from_str(s: &str) -> Result { - let mut parts: Vec<&str> = s.rsplit('-').collect(); - - match parts.len() { - 1 => Ok(WorkerId { - name: parts[0].to_string(), - instance: None, - }), - _ => { - let instance_str = parts[0]; - match instance_str.parse() { - Ok(instance) => { - let remainder = &mut parts[1..]; - remainder.reverse(); - let name = remainder.join("-"); - Ok(WorkerId { - name: name.to_string(), - instance: Some(instance), - }) - } - Err(_) => Ok(WorkerId { - name: { - let all = &mut parts[0..]; - all.reverse(); - all.join("-") - }, - instance: None, - }), - } - } - } + Ok(WorkerId { name: s.to_owned() }) } } impl Display for WorkerId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str(self.name())?; - if let Some(instance) = self.instance { - f.write_str("-")?; - f.write_str(&instance.to_string())?; - } Ok(()) } } @@ -92,26 +51,13 @@ impl WorkerId { pub fn new>(name: T) -> Self { Self { name: name.as_ref().to_string(), - instance: None, } } - /// Build a new worker ref - pub fn new_with_instance>(name: T, instance: usize) -> Self { - Self { - name: name.as_ref().to_string(), - instance: Some(instance), - } - } /// Get the name of the worker pub fn name(&self) -> &str { &self.name } - - /// Get the name of the worker - pub fn instance(&self) -> &Option { - &self.instance - } } /// Events emitted by a worker @@ -120,9 +66,11 @@ pub enum Event { /// Worker started Start, /// Worker got a job - Engage, + Engage(TaskId), /// Worker is idle, stream has no new request for now Idle, + /// A custom event + Custom(String), /// Worker encountered an error Error(BoxDynError), /// Worker stopped @@ -131,6 +79,22 @@ pub enum Event { Exit, } +impl fmt::Display for Worker { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let event_description = match &self.state { + Event::Start => "Worker started".to_string(), + Event::Engage(task_id) => format!("Worker engaged with Task ID: {}", task_id), + Event::Idle => "Worker is idle".to_string(), + Event::Custom(msg) => format!("Custom event: {}", msg), + Event::Error(err) => format!("Worker encountered an error: {}", err), + Event::Stop => "Worker stopped".to_string(), + Event::Exit => "Worker completed all pending tasks and exited".to_string(), + }; + + write!(f, "Worker [{}]: {}", self.id.name, event_description) + } +} + /// Possible errors that can occur when starting a worker. #[derive(Error, Debug, Clone)] pub enum WorkerError { @@ -146,17 +110,51 @@ pub enum WorkerError { } /// A worker that is ready for running -#[derive(Debug)] pub struct Ready { service: S, backend: P, + pub(crate) shutdown: Option, + pub(crate) event_handler: EventHandler, +} + +impl fmt::Debug for Ready +where + S: fmt::Debug, + P: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Ready") + .field("service", &self.service) + .field("backend", &self.backend) + .field("shutdown", &self.shutdown) + .field("event_handler", &"...") // Avoid dumping potentially sensitive or verbose data + .finish() + } } + +impl Clone for Ready +where + S: Clone, + P: Clone, +{ + fn clone(&self) -> Self { + Ready { + service: self.service.clone(), + backend: self.backend.clone(), + shutdown: self.shutdown.clone(), + event_handler: self.event_handler.clone(), + } + } +} + impl Ready { /// Build a worker that is ready for execution pub fn new(service: S, poller: P) -> Self { Ready { service, backend: poller, + shutdown: None, + event_handler: EventHandler::default(), } } } @@ -164,8 +162,8 @@ impl Ready { /// Represents a generic [Worker] that can be in many different states #[derive(Debug, Clone)] pub struct Worker { - id: WorkerId, - state: T, + pub(crate) id: WorkerId, + pub(crate) state: T, } impl Worker { @@ -198,29 +196,83 @@ impl DerefMut for Worker { } } -impl Worker> { - /// Start a worker - pub async fn run(self) { - let instance = self.instance; - let monitor = self.state.context.clone(); - self.state.running.store(true, Ordering::Relaxed); - self.state.await; - if let Some(ctx) = monitor.as_ref() { - ctx.notify(Worker { - state: Event::Exit, - id: WorkerId::new_with_instance(self.id.name, instance), +impl Worker { + /// Allows workers to emit events + pub fn emit(&self, event: Event) -> bool { + if let Some(handler) = self.state.event_handler.read().unwrap().as_ref() { + handler(Worker { + id: self.id().clone(), + state: event, }); - }; + return true; + } + false + } +} + +impl FromRequest> for Worker { + fn from_request(req: &Request) -> Result { + req.parts.data.get_checked().cloned() } } impl Worker> { - fn common_worker_setup( - self, - executor: E, - context: Option, - instances: usize, - ) -> Vec>> + /// Add an event handler to the worker + pub fn on_event) + Send + Sync + 'static>(self, f: F) -> Self { + let _ = self.event_handler.write().map(|mut res| { + let _ = res.insert(Box::new(f)); + }); + self + } + + fn poll_jobs( + worker: Worker, + service: Svc, + stream: Stm, + ) -> BoxStream<'static, ()> + where + Svc: Service, Response = Res> + Send + 'static, + Stm: Stream>, Error>> + Send + Unpin + 'static, + Req: Send + 'static + Sync, + Svc::Future: Send, + Svc::Response: 'static + Send + Sync + Serialize, + Svc::Error: Send + Sync + 'static + Into, + Ctx: Send + 'static + Sync, + Res: 'static, + { + let w = worker.clone(); + let stream = stream.filter_map(move |result| { + let worker = worker.clone(); + + async move { + match result { + Ok(Some(request)) => { + worker.emit(Event::Engage(request.parts.task_id.clone())); + Some(request) + } + Ok(None) => { + worker.emit(Event::Idle); + None + } + Err(err) => { + worker.emit(Event::Error(Box::new(err))); + None + } + } + } + }); + let stream = CallAllUnordered::new(service, stream).map(move |res| { + if let Err(error) = res { + if let Some(Error::MissingData(_)) = error.downcast_ref::() { + w.stop(); + } + w.emit(Event::Error(error)); + } + }); + stream.boxed() + } + /// Start a worker + pub fn run(self) -> Runnable where S: Service, Response = Res> + Send + 'static, P: Backend, Res> + 'static, @@ -229,320 +281,141 @@ impl Worker> { S::Response: 'static + Send + Sync + Serialize, S::Error: Send + Sync + 'static + Into, P::Stream: Unpin + Send + 'static, - E: Executor + Clone + Send + 'static + Sync, P::Layer: Layer, >::Service: Service, Response = Res> + Send, <>::Service as Service>>::Future: Send, <>::Service as Service>>::Error: Send + Into + Sync, Ctx: Send + 'static + Sync, + Res: 'static, { - let notifier = Notify::new(); - let backend = self.state.backend; - let service = self.state.service; - let poller = backend.poll::(self.id.clone()); - let layer = poller.layer; - let service = ServiceBuilder::new().layer(layer).service(service); - let (service, poll_worker) = Buffer::pair(service, instances); - let polling = poller.heartbeat.shared(); - let worker_stream = WorkerStream::new(poller.stream, notifier.clone()) - .into_future() - .shared(); - - executor.spawn(poll_worker); - - (0..instances) - .map(|instance| { - Self::build_worker_instance( - WorkerId::new_with_instance(self.id.name(), instance), - service.clone(), - executor.clone(), - notifier.clone(), - polling.clone(), - worker_stream.clone(), - context.clone(), - ) - }) - .collect() - } - - fn build_worker_instance( - id: WorkerId, - service: LS, - executor: E, - notifier: WorkerNotify>, Error>>, - polling: Shared + Send + 'static>, - worker_stream: Shared + Send + 'static>, - context: Option, - ) -> Worker> - where - LS: Service, Response = Res> + Send + 'static, - LS::Future: Send + 'static, - LS::Response: 'static + Send + Sync + Serialize, - LS::Error: Send + Sync + Into + 'static, - P: Backend, Res>, - E: Executor + Send + Clone + 'static + Sync, - Req: Sync + Send + 'static, - S: 'static, - P: 'static, - Ctx: Send + 'static + Sync, - { - let instance = id.instance.unwrap_or_default(); + let worker_id = self.id().clone(); let ctx = Context { - context, - executor, - instance, running: Arc::default(), task_count: Arc::default(), wakers: Arc::default(), + shutdown: self.state.shutdown, + event_handler: self.state.event_handler.clone(), + }; + let worker = Worker { + id: worker_id.clone(), + state: ctx.clone(), }; - let worker = Worker { id, state: ctx }; + let backend = self.state.backend; + let service = self.state.service; + let poller = backend.poll::(worker_id.clone()); + let stream = poller.stream; + let heartbeat = poller.heartbeat.boxed(); + let layer = poller.layer; + let service = ServiceBuilder::new() + .layer(TrackerLayer::new(worker.state.clone())) + .layer(Data::new(worker.clone())) + .layer(layer) + .service(service); + + Runnable { + poller: Self::poll_jobs(worker.clone(), service, stream), + heartbeat, + worker, + running: false, + } + } +} - let fut = Self::build_instance(instance, service, worker.clone(), notifier); +/// A `Runnable` represents a unit of work that manages a worker's lifecycle and execution flow. +/// +/// The `Runnable` struct is responsible for coordinating the core tasks of a worker, such as polling for jobs, +/// maintaining heartbeats, and tracking its running state. It integrates various components required for +/// the worker to operate effectively within an asynchronous runtime. +#[must_use = "A Runnable must be awaited of no jobs will be consumed"] +pub struct Runnable { + poller: BoxStream<'static, ()>, + heartbeat: BoxFuture<'static, ()>, + worker: Worker, + running: bool, +} - worker.spawn(fut); - worker.spawn(polling); - worker.spawn(worker_stream); - worker +impl Runnable { + /// Returns a handle to the worker, allowing control and functionality like stopping + pub fn get_handle(&self) -> Worker { + self.worker.clone() } +} - /// Setup a worker with an executor - pub fn with_executor(self, executor: E) -> Worker> - where - S: Service, Response = Res> + Send + 'static, - P: Backend, Res> + 'static, - Req: Send + 'static + Sync, - S::Future: Send, - S::Response: 'static + Send + Sync + Serialize, - S::Error: Send + Sync + 'static + Into, - P::Stream: Unpin + Send + 'static, - E: Executor + Clone + Send + 'static + Sync, - P::Layer: Layer, - >::Service: Service, Response = Res> + Send, - <>::Service as Service>>::Future: Send, - <>::Service as Service>>::Error: - Send + Into + Sync, - Ctx: Send + Sync + 'static, - { - self.common_worker_setup(executor, None, 1).pop().unwrap() +impl fmt::Debug for Runnable { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Runnable") + .field("poller", &"") + .field("heartbeat", &"") + .field("worker", &self.worker) + .field("running", &self.running) + .finish() } +} - /// Setup a worker with the monitor - pub fn with_monitor(self, monitor: &Monitor) -> Worker> - where - S: Service, Response = Res> + Send + 'static, - P: Backend, Res> + 'static, - Req: Send + 'static + Sync, - S::Future: Send, - S::Response: 'static + Send + Sync + Serialize, - S::Error: Send + Sync + 'static + Into, - P::Stream: Unpin + Send + 'static, - E: Executor + Clone + Send + 'static + Sync, - P::Layer: Layer, - >::Service: Service, Response = Res> + Send, - <>::Service as Service>>::Future: Send, - <>::Service as Service>>::Error: - Send + Into + Sync, - Ctx: Send + Sync + 'static, - { - self.common_worker_setup( - monitor.executor().clone(), - Some(monitor.context().clone()), - 1, - ) - .pop() - .unwrap() - } +impl Future for Runnable { + type Output = (); - /// Setup instances of the worker with the Monitor - pub fn with_monitor_instances( - self, - instances: usize, - monitor: &Monitor, - ) -> Vec>> - where - S: Service, Response = Res> + Send + 'static, - P: Backend, Res> + 'static, - Req: Send + 'static + Sync, - S::Future: Send, - S::Response: 'static + Send + Sync + Serialize, - S::Error: Send + Sync + 'static + Into, - P::Stream: Unpin + Send + 'static, - E: Executor + Clone + Send + 'static + Sync, - P::Layer: Layer, - >::Service: Service, Response = Res> + Send, - <>::Service as Service>>::Future: Send, - <>::Service as Service>>::Error: - Send + Into + Sync, - Ctx: Send + Sync + 'static, - { - self.common_worker_setup( - monitor.executor().clone(), - Some(monitor.context().clone()), - instances, - ) - } + fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + let this = self.get_mut(); + let poller = &mut this.poller; + let heartbeat = &mut this.heartbeat; + let worker = &mut this.worker; - /// Setup worker instances providing an executor - pub fn with_executor_instances( - self, - instances: usize, - executor: E, - ) -> Vec>> - where - S: Service, Response = Res> + Send + 'static, - P: Backend, Res> + 'static, - Req: Send + 'static + Sync, - S::Future: Send, - S::Response: 'static + Send + Sync + Serialize, - S::Error: Send + Sync + 'static + Into, - P::Stream: Unpin + Send + 'static, - E: Executor + Clone + Send + 'static + Sync, - P::Layer: Layer, - >::Service: Service, Response = Res> + Send, - <>::Service as Service>>::Future: Send, - <>::Service as Service>>::Error: - Send + Into + Sync, - Ctx: Send + Sync + 'static, - { - self.common_worker_setup(executor, None, instances) - } - - pub(crate) async fn build_instance( - instance: usize, - service: LS, - worker: Worker>, - notifier: WorkerNotify>, Error>>, - ) where - LS: Service, Response = Res> + Send + 'static, - LS::Future: Send + 'static, - LS::Response: 'static, - LS::Error: Send + Sync + Into + 'static, - P: Backend, Res>, - E: Executor + Send + Clone + 'static + Sync, - { - if let Some(ctx) = worker.state.context.as_ref() { - ctx.notify(Worker { - state: Event::Start, - id: WorkerId::new_with_instance(worker.id.name(), instance), - }); - }; - let worker_layers = ServiceBuilder::new() - .layer(Data::new(worker.id.clone())) - .layer(Data::new(worker.state.clone())); - let mut service = worker_layers.service(service); - worker.running.store(true, Ordering::Relaxed); - let worker_id = worker.id().clone(); - loop { - if worker.is_shutting_down() { - if let Some(ctx) = worker.state.context.as_ref() { - ctx.notify(Worker { - state: Event::Stop, - id: WorkerId::new_with_instance(worker.id.name(), instance), - }); - }; - break; - } - match service.ready().await { - Ok(service) => { - let (sender, receiver) = async_oneshot::oneshot(); - let res = notifier.notify(Worker { - id: WorkerId::new_with_instance(worker.id.name(), instance), - state: FetchNext::new(sender), - }); - - if res.is_ok() { - match receiver.await { - Ok(Ok(Some(req))) => { - let fut = service.call(req); - let worker_id = worker_id.clone(); - let w = worker.clone(); - let state = worker.state.clone(); - worker.spawn(fut.map(move |res| { - if let Err(e) = res { - let error = e.into(); - if let Some(Error::MissingData(e)) = - error.downcast_ref::() - { - w.force_stop(); - unreachable!("Worker missing required context: {}", e); - } - if let Some(ctx) = state.context.as_ref() { - ctx.notify(Worker { - state: Event::Error(error), - id: WorkerId::new_with_instance( - worker_id.name(), - instance, - ), - }); - }; - } - })); - } - Ok(Err(e)) => { - if let Some(ctx) = worker.state.context.as_ref() { - ctx.notify(Worker { - state: Event::Error(Box::new(e)), - id: WorkerId::new_with_instance(worker.id.name(), instance), - }); - }; - } - Ok(Ok(None)) => { - if let Some(ctx) = worker.state.context.as_ref() { - ctx.notify(Worker { - state: Event::Idle, - id: WorkerId::new_with_instance(worker.id.name(), instance), - }); - }; - } - Err(_) => { - // Listener was dropped, no need to notify - } - } - } - } - Err(e) => { - if let Some(ctx) = worker.state.context.as_ref() { - ctx.notify(Worker { - state: Event::Error(e.into()), - id: WorkerId::new_with_instance(worker.id.name(), instance), - }); - }; - } + let poller_future = async { while (poller.next().await).is_some() {} }; + + if !this.running { + worker.running.store(true, Ordering::Relaxed); + this.running = true; + worker.emit(Event::Start); + } + let combined = Box::pin(join(poller_future, heartbeat.as_mut())); + + let mut combined = select( + combined, + worker.state.clone().map(|_| worker.emit(Event::Stop)), + ) + .boxed(); + match Pin::new(&mut combined).poll(cx) { + Poll::Ready(_) => { + worker.emit(Event::Exit); + Poll::Ready(()) } + Poll::Pending => Poll::Pending, } } } /// Stores the Workers context #[derive(Clone)] -pub struct Context { - context: Option, - executor: E, +pub struct Context { task_count: Arc, wakers: Arc>>, running: Arc, - instance: usize, + shutdown: Option, + event_handler: EventHandler, } -impl fmt::Debug for Context { +impl fmt::Debug for Context { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("WorkerContext") .field("shutdown", &["Shutdown handle"]) - .field("instance", &self.instance) + .field("task_count", &self.task_count) + .field("running", &self.running) .finish() } } pin_project! { - struct Tracked { - worker: Context, + /// A future tracked by the worker + pub struct Tracked { + ctx: Context, #[pin] task: F, } } -impl Future for Tracked { +impl Future for Tracked { type Output = F::Output; fn poll(self: Pin<&mut Self>, cx: &mut TaskCtx<'_>) -> Poll { @@ -550,7 +423,7 @@ impl Future for Tracked { match this.task.poll(cx) { res @ Poll::Ready(_) => { - this.worker.end_task(); + this.ctx.end_task(); res } Poll::Pending => Poll::Pending, @@ -558,26 +431,16 @@ impl Future for Tracked { } } -impl Context { - /// Allows spawning of futures that will be gracefully shutdown by the worker - pub fn spawn(&self, future: impl Future + Send + 'static) { - self.executor.spawn(self.track(future)); - } - - fn track>(&self, task: F) -> Tracked { +impl Context { + /// Start a task that is tracked by the worker + pub fn track(&self, task: F) -> Tracked { self.start_task(); Tracked { - worker: self.clone(), + ctx: self.clone(), task, } } - /// Calling this function triggers shutting down the worker without waiting for any tasks to complete - pub fn force_stop(&self) { - self.task_count.store(WORKER_FUTURES, Ordering::Relaxed); - self.stop(); - } - /// Calling this function triggers shutting down the worker while waiting for any tasks to complete pub fn stop(&self) { self.running.store(false, Ordering::Relaxed); @@ -589,7 +452,7 @@ impl Context { } fn end_task(&self) { - if self.task_count.fetch_sub(1, Ordering::Relaxed) == WORKER_FUTURES { + if self.task_count.fetch_sub(1, Ordering::Relaxed) == 1 { self.wake(); } } @@ -607,11 +470,22 @@ impl Context { self.running.load(Ordering::Relaxed) } + /// Returns the current futures in the worker domain + /// This include futures spawned via `worker.track` + pub fn task_count(&self) -> usize { + self.task_count.load(Ordering::Relaxed) + } + + /// Returns whether the worker has pending tasks + pub fn has_pending_tasks(&self) -> bool { + self.task_count.load(Ordering::Relaxed) > 0 + } + /// Is the shutdown token called pub fn is_shutting_down(&self) -> bool { - self.context + self.shutdown .as_ref() - .map(|s| !self.is_running() || s.shutdown().is_shutting_down()) + .map(|s| !self.is_running() || s.is_shutting_down()) .unwrap_or(!self.is_running()) } @@ -624,22 +498,13 @@ impl Context { } } -// impl FromRequest for Context {} - -impl Future for Context { +impl Future for Context { type Output = (); fn poll(self: Pin<&mut Self>, cx: &mut TaskCtx<'_>) -> Poll<()> { - let running = self.is_running(); let task_count = self.task_count.load(Ordering::Relaxed); - if self.is_shutting_down() || !running { - if task_count <= WORKER_FUTURES { - self.stop(); - Poll::Ready(()) - } else { - self.add_waker(cx); - Poll::Pending - } + if self.is_shutting_down() && task_count == 0 { + Poll::Ready(()) } else { self.add_waker(cx); Poll::Pending @@ -647,18 +512,53 @@ impl Future for Context { } } -#[cfg(test)] -mod tests { - use std::{ops::Deref, sync::atomic::AtomicUsize, time::Duration}; +#[derive(Debug, Clone)] +struct TrackerLayer { + ctx: Context, +} + +impl TrackerLayer { + fn new(ctx: Context) -> Self { + Self { ctx } + } +} - #[derive(Debug, Clone)] - struct TokioTestExecutor; +impl Layer for TrackerLayer { + type Service = TrackerService; - impl Executor for TokioTestExecutor { - fn spawn(&self, future: impl Future + Send + 'static) { - tokio::spawn(future); + fn layer(&self, service: S) -> Self::Service { + TrackerService { + ctx: self.ctx.clone(), + service, } } +} +#[derive(Debug, Clone)] +struct TrackerService { + ctx: Context, + service: S, +} + +impl Service> for TrackerService +where + S: Service>, +{ + type Response = S::Response; + type Error = S::Error; + type Future = Tracked; + + fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> Poll> { + self.service.poll_ready(cx) + } + + fn call(&mut self, request: Request) -> Self::Future { + self.ctx.track(self.service.call(request)) + } +} + +#[cfg(test)] +mod tests { + use std::{ops::Deref, sync::atomic::AtomicUsize}; use crate::{ builder::{WorkerBuilder, WorkerFactoryFn}, @@ -676,22 +576,19 @@ mod tests { assert_eq!( WorkerId::from_str("worker").unwrap(), WorkerId { - instance: None, name: "worker".to_string() } ); assert_eq!( WorkerId::from_str("worker-0").unwrap(), WorkerId { - instance: Some(0), - name: "worker".to_string() + name: "worker-0".to_string() } ); assert_eq!( WorkerId::from_str("complex&*-worker-name-0").unwrap(), WorkerId { - instance: Some(0), - name: "complex&*-worker-name".to_string() + name: "complex&*-worker-name-0".to_string() } ); } @@ -717,23 +614,16 @@ mod tests { } } - async fn task(job: u32, count: Data) { + async fn task(job: u32, count: Data, worker: Worker) { count.fetch_add(1, Ordering::Relaxed); if job == ITEMS - 1 { - tokio::time::sleep(Duration::from_secs(1)).await; + worker.stop(); } } let worker = WorkerBuilder::new("rango-tango") .data(Count::default()) .backend(in_memory); let worker = worker.build_fn(task); - let worker = worker.with_executor(TokioTestExecutor); - let w = worker.clone(); - - tokio::spawn(async move { - tokio::time::sleep(Duration::from_secs(3)).await; - w.stop(); - }); worker.run().await; } } diff --git a/packages/apalis-core/src/worker/stream.rs b/packages/apalis-core/src/worker/stream.rs deleted file mode 100644 index e2a9ec81..00000000 --- a/packages/apalis-core/src/worker/stream.rs +++ /dev/null @@ -1,55 +0,0 @@ -use futures::{Future, Stream, StreamExt}; -use std::pin::Pin; -use std::task::{Context, Poll}; - -use super::WorkerNotify; - -pub(crate) struct WorkerStream -where - S: Stream, -{ - notify: WorkerNotify, - stream: S, -} - -impl WorkerStream -where - S: Stream + Unpin + 'static, -{ - pub(crate) fn new(stream: S, notify: WorkerNotify) -> Self { - Self { notify, stream } - } - pub(crate) fn into_future(mut self) -> impl Future { - Box::pin(async move { - loop { - self.next().await; - } - }) - } -} - -impl Stream for WorkerStream -where - S: Stream + Unpin, -{ - type Item = (); - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.get_mut(); - // Poll for the next listener - match this.notify.poll_next_unpin(cx) { - Poll::Ready(Some(mut worker)) => { - match this.stream.poll_next_unpin(cx) { - Poll::Ready(Some(item)) => { - if let Err(_e) = worker.send(item) {} - Poll::Ready(Some(())) - } - Poll::Ready(None) => Poll::Ready(None), // Inner stream is exhausted - Poll::Pending => Poll::Pending, - } - } - Poll::Ready(None) => Poll::Ready(None), // No more workers - Poll::Pending => Poll::Pending, - } - } -} diff --git a/packages/apalis-cron/README.md b/packages/apalis-cron/README.md index c8a2e741..deb30ff9 100644 --- a/packages/apalis-cron/README.md +++ b/packages/apalis-cron/README.md @@ -36,11 +36,11 @@ async fn send_reminder(job: Reminder, svc: Data) { async fn main() { let schedule = Schedule::from_str("@daily").unwrap(); let worker = WorkerBuilder::new("morning-cereal") - .layer(RetryLayer::new(RetryPolicy::retries(5))) + .retry(RetryPolicy::retries(5)) .data(FakeService) .stream(CronStream::new(schedule).into_stream()) .build_fn(send_reminder); - Monitor::::new() + Monitor::new() .register(worker) .run() .await diff --git a/packages/apalis-cron/src/lib.rs b/packages/apalis-cron/src/lib.rs index 9680ec76..621f002d 100644 --- a/packages/apalis-cron/src/lib.rs +++ b/packages/apalis-cron/src/lib.rs @@ -45,11 +45,11 @@ //! async fn main() { //! let schedule = Schedule::from_str("@daily").unwrap(); //! let worker = WorkerBuilder::new("morning-cereal") -//! .layer(RetryLayer::new(RetryPolicy::retries(5))) +//! .retry(RetryPolicy::retries(5)) //! .data(FakeService) //! .backend(CronStream::new(schedule)) //! .build_fn(send_reminder); -//! Monitor::::new() +//! Monitor::new() //! .register(worker) //! .run() //! .await diff --git a/packages/apalis-redis/src/lib.rs b/packages/apalis-redis/src/lib.rs index 01c8c04d..bffe5818 100644 --- a/packages/apalis-redis/src/lib.rs +++ b/packages/apalis-redis/src/lib.rs @@ -15,7 +15,7 @@ //! async fn main() { //! let conn = apalis_redis::connect("redis://127.0.0.1/").await.unwrap(); //! let storage = RedisStorage::new(conn); -//! Monitor::::new() +//! Monitor::new() //! .register( //! WorkerBuilder::new("tasty-pear") //! .backend(storage.clone()) diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index a8b7d442..b9d1a31d 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -667,7 +667,7 @@ where async fn len(&mut self) -> Result { let all_jobs: i64 = redis::cmd("HLEN") - .arg(&self.config.job_data_hash()) + .arg(self.config.job_data_hash()) .query_async(&mut self.conn) .await?; let done_jobs: i64 = redis::cmd("ZCOUNT") @@ -684,7 +684,7 @@ where job_id: &TaskId, ) -> Result>, RedisError> { let data: Value = redis::cmd("HMGET") - .arg(&self.config.job_data_hash()) + .arg(self.config.job_data_hash()) .arg(job_id.to_string()) .query_async(&mut self.conn) .await?; @@ -699,7 +699,7 @@ where let bytes = C::encode(&job) .map_err(|e| (ErrorKind::IoError, "Encode error", e.into().to_string()))?; let _: i64 = redis::cmd("HSET") - .arg(&self.config.job_data_hash()) + .arg(self.config.job_data_hash()) .arg(task_id) .arg(bytes) .query_async(&mut self.conn) diff --git a/packages/apalis-sql/src/lib.rs b/packages/apalis-sql/src/lib.rs index edc37126..af3bab92 100644 --- a/packages/apalis-sql/src/lib.rs +++ b/packages/apalis-sql/src/lib.rs @@ -231,18 +231,6 @@ macro_rules! sql_storage_tests { async fn worker_consume() { use apalis_core::builder::WorkerBuilder; use apalis_core::builder::WorkerFactoryFn; - use apalis_core::executor::Executor; - use std::future::Future; - - #[derive(Debug, Clone)] - struct TokioTestExecutor; - - impl Executor for TokioTestExecutor { - fn spawn(&self, future: impl Future + Send + 'static) { - tokio::spawn(future); - } - } - let storage = $setup().await; let mut handle = storage.clone(); @@ -257,8 +245,9 @@ macro_rules! sql_storage_tests { } let worker = WorkerBuilder::new("rango-tango").backend(storage); let worker = worker.build_fn(task); - let worker = worker.with_executor(TokioTestExecutor); - let w = worker.clone(); + let wkr = worker.run(); + + let w = wkr.get_handle(); let runner = async move { apalis_core::sleep(Duration::from_secs(3)).await; @@ -275,7 +264,6 @@ macro_rules! sql_storage_tests { w.stop(); }; - let wkr = worker.run(); tokio::join!(runner, wkr); } }; diff --git a/packages/apalis-sql/src/postgres.rs b/packages/apalis-sql/src/postgres.rs index 31d6b93a..5b6ac270 100644 --- a/packages/apalis-sql/src/postgres.rs +++ b/packages/apalis-sql/src/postgres.rs @@ -27,8 +27,8 @@ //! // let query = "Select apalis.push_job('apalis::Email', json_build_object('subject', 'Test apalis', 'to', 'test1@example.com', 'text', 'Lorem Ipsum'));"; //! // pg.execute(query).await.unwrap(); //! -//! Monitor::::new() -//! .register_with_count(4, { +//! Monitor::new() +//! .register({ //! WorkerBuilder::new(&format!("tasty-avocado")) //! .data(0usize) //! .backend(pg) @@ -432,7 +432,7 @@ where let job_type = self.config.namespace.clone(); sqlx::query(query) .bind(args) - .bind(&req.parts.task_id.to_string()) + .bind(req.parts.task_id.to_string()) .bind(&job_type) .bind(&req.parts.context.max_attempts()) .execute(&self.pool) diff --git a/packages/apalis-sql/src/sqlite.rs b/packages/apalis-sql/src/sqlite.rs index 93d62490..ecbc9f0f 100644 --- a/packages/apalis-sql/src/sqlite.rs +++ b/packages/apalis-sql/src/sqlite.rs @@ -240,7 +240,7 @@ where let job_type = self.config.namespace.clone(); sqlx::query(query) .bind(raw) - .bind(&parts.task_id.to_string()) + .bind(parts.task_id.to_string()) .bind(job_type.to_string()) .bind(&parts.context.max_attempts()) .execute(&self.pool) diff --git a/src/layers/mod.rs b/src/layers/mod.rs index 93294444..47bfbeb6 100644 --- a/src/layers/mod.rs +++ b/src/layers/mod.rs @@ -23,6 +23,10 @@ pub mod limit { pub use tower::limit::RateLimitLayer; } +use apalis_core::{builder::WorkerBuilder, layers::Identity}; +#[cfg(feature = "catch-panic")] +use catch_panic::CatchPanicLayer; +use tower::layer::util::Stack; /// Timeout middleware for apalis #[cfg(feature = "timeout")] #[cfg_attr(docsrs, doc(cfg(feature = "timeout")))] @@ -34,3 +38,281 @@ pub use tower::timeout::TimeoutLayer; pub mod catch_panic; pub use apalis_core::error::ErrorHandlingLayer; + +/// A trait that extends `WorkerBuilder` with additional middleware methods +/// derived from `tower::ServiceBuilder`. +pub trait WorkerBuilderExt { + /// Optionally adds a new layer `T` into the [`WorkerBuilder`]. + fn option_layer( + self, + layer: Option, + ) -> WorkerBuilder, Middleware>, Serv>; + + /// Adds a [`Layer`] built from a function that accepts a service and returns another service. + fn layer_fn( + self, + f: F, + ) -> WorkerBuilder, Middleware>, Serv>; + + /// Limits the max number of in-flight requests. + #[cfg(feature = "limit")] + fn concurrency( + self, + max: usize, + ) -> WorkerBuilder, Serv>; + + /// Limits requests to at most `num` per the given duration. + #[cfg(feature = "limit")] + fn rate_limit( + self, + num: u64, + per: std::time::Duration, + ) -> WorkerBuilder, Serv>; + + /// Retries failed requests according to the given retry policy. + #[cfg(feature = "retry")] + fn retry

( + self, + policy: P, + ) -> WorkerBuilder, Middleware>, Serv>; + + /// Fails requests that take longer than `timeout`. + #[cfg(feature = "timeout")] + fn timeout( + self, + timeout: std::time::Duration, + ) -> WorkerBuilder, Serv>; + + /// Conditionally rejects requests based on `predicate`. + #[cfg(feature = "filter")] + fn filter

( + self, + predicate: P, + ) -> WorkerBuilder, Middleware>, Serv>; + + /// Conditionally rejects requests based on an asynchronous `predicate`. + #[cfg(feature = "filter")] + fn filter_async

( + self, + predicate: P, + ) -> WorkerBuilder, Middleware>, Serv>; + + /// Maps one request type to another. + fn map_request( + self, + f: F, + ) -> WorkerBuilder, Middleware>, Serv> + where + F: FnMut(R1) -> R2 + Clone; + + /// Maps one response type to another. + fn map_response( + self, + f: F, + ) -> WorkerBuilder, Middleware>, Serv>; + + /// Maps one error type to another. + fn map_err( + self, + f: F, + ) -> WorkerBuilder, Middleware>, Serv>; + + /// Composes a function that transforms futures produced by the service. + fn map_future( + self, + f: F, + ) -> WorkerBuilder, Middleware>, Serv>; + + /// Applies an asynchronous function after the service, regardless of whether the future succeeds or fails. + fn then( + self, + f: F, + ) -> WorkerBuilder, Middleware>, Serv>; + + /// Executes a new future after this service's future resolves. + fn and_then( + self, + f: F, + ) -> WorkerBuilder, Middleware>, Serv>; + + /// Maps the service's result type to a different value, regardless of success or failure. + fn map_result( + self, + f: F, + ) -> WorkerBuilder, Middleware>, Serv>; + + /// Catch panics in execution and pipe them as errors + #[cfg(feature = "catch-panic")] + #[cfg_attr(docsrs, doc(cfg(feature = "catch-panic")))] + #[allow(clippy::type_complexity)] + fn catch_panic( + self, + ) -> WorkerBuilder< + Req, + Ctx, + Source, + Stack< + CatchPanicLayer) -> apalis_core::error::Error>, + Middleware, + >, + Serv, + >; + /// Enable tracing via tracing crate + #[cfg(feature = "tracing")] + #[cfg_attr(docsrs, doc(cfg(feature = "tracing")))] + fn enable_tracing( + self, + ) -> WorkerBuilder, Serv>; +} + +impl WorkerBuilderExt + for WorkerBuilder +{ + fn option_layer( + self, + layer: Option, + ) -> WorkerBuilder, Middleware>, Serv> + { + self.chain(|sb| sb.option_layer(layer)) + } + + fn layer_fn( + self, + f: F, + ) -> WorkerBuilder, Middleware>, Serv> { + self.chain(|sb| sb.layer_fn(f)) + } + + #[cfg(feature = "limit")] + fn concurrency( + self, + max: usize, + ) -> WorkerBuilder, Serv> + { + self.chain(|sb| sb.concurrency_limit(max)) + } + + #[cfg(feature = "limit")] + fn rate_limit( + self, + num: u64, + per: std::time::Duration, + ) -> WorkerBuilder, Serv> { + self.chain(|sb| sb.rate_limit(num, per)) + } + + #[cfg(feature = "retry")] + fn retry

( + self, + policy: P, + ) -> WorkerBuilder, Middleware>, Serv> { + self.chain(|sb| sb.retry(policy)) + } + + #[cfg(feature = "timeout")] + fn timeout( + self, + timeout: std::time::Duration, + ) -> WorkerBuilder, Serv> { + self.chain(|sb| sb.timeout(timeout)) + } + + #[cfg(feature = "filter")] + fn filter

( + self, + predicate: P, + ) -> WorkerBuilder, Middleware>, Serv> { + self.chain(|sb| sb.filter(predicate)) + } + + #[cfg(feature = "filter")] + fn filter_async

( + self, + predicate: P, + ) -> WorkerBuilder, Middleware>, Serv> + { + self.chain(|sb| sb.filter_async(predicate)) + } + + fn map_request( + self, + f: F, + ) -> WorkerBuilder, Middleware>, Serv> + where + F: FnMut(R1) -> R2 + Clone, + { + self.chain(|sb| sb.map_request(f)) + } + + fn map_response( + self, + f: F, + ) -> WorkerBuilder, Middleware>, Serv> + { + self.chain(|sb| sb.map_response(f)) + } + + fn map_err( + self, + f: F, + ) -> WorkerBuilder, Middleware>, Serv> { + self.chain(|sb| sb.map_err(f)) + } + + fn map_future( + self, + f: F, + ) -> WorkerBuilder, Middleware>, Serv> { + self.chain(|sb| sb.map_future(f)) + } + + fn then( + self, + f: F, + ) -> WorkerBuilder, Middleware>, Serv> { + self.chain(|sb| sb.then(f)) + } + + fn and_then( + self, + f: F, + ) -> WorkerBuilder, Middleware>, Serv> { + self.chain(|sb| sb.and_then(f)) + } + + fn map_result( + self, + f: F, + ) -> WorkerBuilder, Middleware>, Serv> { + self.chain(|sb| sb.map_result(f)) + } + + /// Catch panics in execution and pipe them as errors + #[cfg(feature = "catch-panic")] + #[cfg_attr(docsrs, doc(cfg(feature = "catch-panic")))] + fn catch_panic( + self, + ) -> WorkerBuilder< + Req, + Ctx, + (), + Stack< + CatchPanicLayer) -> apalis_core::error::Error>, + Middleware, + >, + Serv, + > { + self.chain(|svc| svc.layer(CatchPanicLayer::new())) + } + + /// Enable tracing via tracing crate + #[cfg(feature = "tracing")] + #[cfg_attr(docsrs, doc(cfg(feature = "tracing")))] + fn enable_tracing( + self, + ) -> WorkerBuilder, Serv> { + use tracing::TraceLayer; + + self.chain(|svc| svc.layer(TraceLayer::new())) + } +} diff --git a/src/layers/tracing/make_span.rs b/src/layers/tracing/make_span.rs index 2ef9eb3a..101d7efa 100644 --- a/src/layers/tracing/make_span.rs +++ b/src/layers/tracing/make_span.rs @@ -63,10 +63,12 @@ impl Default for DefaultMakeSpan { } impl MakeSpan for DefaultMakeSpan { - fn make_span(&mut self, _req: &Request) -> Span { + fn make_span(&mut self, req: &Request) -> Span { // This ugly macro is needed, unfortunately, because `tracing::span!` // required the level argument to be static. Meaning we can't just pass // `self.level`. + let task_id = req.parts.task_id.to_string(); + let attempt = req.parts.attempt.current(); let span = Span::current(); macro_rules! make_span { ($level:expr) => { @@ -74,6 +76,8 @@ impl MakeSpan for DefaultMakeSpan { parent: span, $level, "task", + task_id = task_id, + attempt = attempt ) }; } diff --git a/src/layers/tracing/on_request.rs b/src/layers/tracing/on_request.rs index c983d721..bc3ac718 100644 --- a/src/layers/tracing/on_request.rs +++ b/src/layers/tracing/on_request.rs @@ -80,19 +80,19 @@ impl OnRequest for DefaultOnRequest { fn on_request(&mut self, _: &Request, _: &Span) { match self.level { Level::ERROR => { - tracing::event!(Level::ERROR, "job.start",); + tracing::event!(Level::ERROR, "task.start",); } Level::WARN => { - tracing::event!(Level::WARN, "job.start",); + tracing::event!(Level::WARN, "task.start",); } Level::INFO => { - tracing::event!(Level::INFO, "job.start",); + tracing::event!(Level::INFO, "task.start",); } Level::DEBUG => { - tracing::event!(Level::DEBUG, "job.start",); + tracing::event!(Level::DEBUG, "task.start",); } Level::TRACE => { - tracing::event!(Level::TRACE, "job.start",); + tracing::event!(Level::TRACE, "task.start",); } } } diff --git a/src/layers/tracing/on_response.rs b/src/layers/tracing/on_response.rs index fa7b67a6..8889332f 100644 --- a/src/layers/tracing/on_response.rs +++ b/src/layers/tracing/on_response.rs @@ -106,7 +106,7 @@ macro_rules! log_pattern_match { Level::$level, done_in = format_args!("{}s", $done_in.as_secs_f64()), result = format_args!("{:?}", $res), - "job.done" + "task.done" ); } (Level::$level, LatencyUnit::Millis) => { @@ -114,7 +114,7 @@ macro_rules! log_pattern_match { Level::$level, done_in = format_args!("{}ms", $done_in.as_millis()), result = format_args!("{:?}", $res), - "job.done" + "task.done" ); } (Level::$level, LatencyUnit::Micros) => { @@ -122,7 +122,7 @@ macro_rules! log_pattern_match { Level::$level, done_in = format_args!("{}μs", $done_in.as_micros()), result = format_args!("{:?}", $res), - "job.done" + "task.done" ); } (Level::$level, LatencyUnit::Nanos) => { @@ -130,7 +130,7 @@ macro_rules! log_pattern_match { Level::$level, done_in = format_args!("{}ns", $done_in.as_nanos()), result = format_args!("{:?}", $res), - "job.done" + "task.done" ); } diff --git a/src/lib.rs b/src/lib.rs index 493cd3e9..2e3a3e84 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -35,9 +35,10 @@ //! let redis = std::env::var("REDIS_URL").expect("Missing REDIS_URL env variable"); //! let conn = apalis_redis::connect(redis).await.unwrap(); //! let storage = RedisStorage::new(conn); -//! Monitor::::new() -//! .register_with_count(2, { +//! Monitor::new() +//! .register({ //! WorkerBuilder::new(&format!("quick-sand")) +//! .concurrency(2) //! .data(0usize) //! .backend(storage.clone()) //! .build_fn(send_email) @@ -65,49 +66,20 @@ /// apalis fully supports middleware via [`Layer`](https://docs.rs/tower/latest/tower/trait.Layer.html) pub mod layers; -/// Utilities for working with apalis -pub mod utils { - /// Executor for [`tokio`] - #[cfg(feature = "tokio-comp")] - #[derive(Clone, Debug, Default)] - pub struct TokioExecutor; - - #[cfg(feature = "tokio-comp")] - impl apalis_core::executor::Executor for TokioExecutor { - fn spawn(&self, future: impl std::future::Future + Send + 'static) { - tokio::spawn(future); - } - } - - /// Executor for [`async_std`] - #[cfg(feature = "async-std-comp")] - #[derive(Clone, Debug, Default)] - pub struct AsyncStdExecutor; - - #[cfg(feature = "async-std-comp")] - impl apalis_core::executor::Executor for AsyncStdExecutor { - fn spawn(&self, future: impl std::future::Future + Send + 'static) { - async_std::task::spawn(future); - } - } -} - /// Common imports pub mod prelude { - #[cfg(feature = "tokio-comp")] - pub use crate::utils::TokioExecutor; + pub use crate::layers::WorkerBuilderExt; pub use apalis_core::{ builder::{WorkerBuilder, WorkerFactory, WorkerFactoryFn}, data::Extensions, error::{BoxDynError, Error}, - executor::Executor, layers::extensions::{AddExtension, Data}, memory::{MemoryStorage, MemoryWrapper}, - monitor::{Monitor, MonitorContext}, + monitor::Monitor, mq::MessageQueue, notify::Notify, poller::stream::BackendStream, - poller::{controller::Controller, FetchNext, Poller}, + poller::{controller::Controller, Poller}, request::{Request, RequestStream}, response::IntoResponse, service_fn::{service_fn, FromRequest, ServiceFn}, From e7dad8cb1ff8359310face0398697c8aa347a634 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Fri, 22 Nov 2024 00:17:55 +0300 Subject: [PATCH 56/59] Chore/better api (#452) * fix: relax the api provided for sqlx req * lint: clippy and fmt --- examples/redis-with-msg-pack/Cargo.toml | 2 +- packages/apalis-sql/src/from_row.rs | 39 +++++-------------------- 2 files changed, 8 insertions(+), 33 deletions(-) diff --git a/examples/redis-with-msg-pack/Cargo.toml b/examples/redis-with-msg-pack/Cargo.toml index abbd795e..c6ec5cce 100644 --- a/examples/redis-with-msg-pack/Cargo.toml +++ b/examples/redis-with-msg-pack/Cargo.toml @@ -14,7 +14,7 @@ tracing-subscriber = "0.3.11" chrono = { version = "0.4", default-features = false, features = ["clock"] } email-service = { path = "../email-service" } rmp-serde = "1.3" -redis = "0.25" +redis = "0.27" [dependencies.tracing] diff --git a/packages/apalis-sql/src/from_row.rs b/packages/apalis-sql/src/from_row.rs index 88623c3a..db5787d8 100644 --- a/packages/apalis-sql/src/from_row.rs +++ b/packages/apalis-sql/src/from_row.rs @@ -10,43 +10,15 @@ use crate::context::SqlContext; /// Wrapper for [Request] #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SqlRequest { - pub(crate) req: Request, + /// The inner request + pub req: Request, + pub(crate) _priv: (), } impl SqlRequest { /// Creates a new SqlRequest. pub fn new(req: Request) -> Self { - SqlRequest { req } - } - - /// Gets a reference to the request. - pub fn req(&self) -> &T { - &self.req.args - } - - /// Gets a mutable reference to the request. - pub fn req_mut(&mut self) -> &mut T { - &mut self.req.args - } - - /// Sets the request. - pub fn set_req(&mut self, req: T) { - self.req.args = req; - } - - /// Gets a reference to the context. - pub fn context(&self) -> &SqlContext { - &self.req.parts.context - } - - /// Gets a mutable reference to the context. - pub fn context_mut(&mut self) -> &mut SqlContext { - &mut self.req.parts.context - } - - /// Sets the context. - pub fn set_context(&mut self, context: SqlContext) { - self.req.parts.context = context; + SqlRequest { req, _priv: () } } } @@ -110,6 +82,7 @@ impl<'r, T: Decode<'r, sqlx::Sqlite> + Type> parts.context = context; Ok(SqlRequest { req: Request::new_with_parts(job, parts), + _priv: (), }) } } @@ -173,6 +146,7 @@ impl<'r, T: Decode<'r, sqlx::Postgres> + Type> parts.context = context; Ok(SqlRequest { req: Request::new_with_parts(job, parts), + _priv: (), }) } } @@ -235,6 +209,7 @@ impl<'r, T: Decode<'r, sqlx::MySql> + Type> sqlx::FromRow<'r, sqlx: parts.context = context; Ok(SqlRequest { req: Request::new_with_parts(job, parts), + _priv: (), }) } } From 995be7cd4f1e4753461916f909dbff7311af4c68 Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Fri, 22 Nov 2024 23:01:31 +0300 Subject: [PATCH 57/59] feat: add recovery of abandoned jobs to backend heartbeats (#453) * feat: add recovery of abandoned jobs to backend heartbeats * lint: fmt * fix: attempt to get tests passing * fix: attempt to get tests passing * fix: minor fix typo * fix: minor different solutions * fix: better handle attempts * handle postgres edge case * fix: better handling --- packages/apalis-redis/src/storage.rs | 70 ++++++++++++++++++++++--- packages/apalis-sql/src/lib.rs | 21 ++++++++ packages/apalis-sql/src/mysql.rs | 76 ++++++++++++++++++---------- packages/apalis-sql/src/postgres.rs | 48 +++++++++++++----- packages/apalis-sql/src/sqlite.rs | 58 ++++++++++++++++----- 5 files changed, 211 insertions(+), 62 deletions(-) diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index b9d1a31d..1f530480 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -12,7 +12,7 @@ use apalis_core::task::namespace::Namespace; use apalis_core::task::task_id::TaskId; use apalis_core::worker::WorkerId; use apalis_core::{Backend, Codec}; -use chrono::Utc; +use chrono::{DateTime, Utc}; use futures::channel::mpsc::{self, Sender}; use futures::{select, FutureExt, SinkExt, StreamExt, TryFutureExt}; use log::*; @@ -114,6 +114,7 @@ pub struct Config { max_retries: usize, keep_alive: Duration, enqueue_scheduled: Duration, + reenqueue_orphaned_after: Duration, namespace: String, } @@ -125,6 +126,7 @@ impl Default for Config { max_retries: 5, keep_alive: Duration::from_secs(30), enqueue_scheduled: Duration::from_secs(30), + reenqueue_orphaned_after: Duration::from_secs(300), namespace: String::from("apalis_redis"), } } @@ -277,6 +279,25 @@ impl Config { pub fn signal_list(&self) -> String { SIGNAL_LIST.replace("{queue}", &self.namespace) } + + /// Gets the reenqueue_orphaned_after duration. + pub fn reenqueue_orphaned_after(&self) -> Duration { + self.reenqueue_orphaned_after + } + + /// Gets a mutable reference to the reenqueue_orphaned_after. + pub fn reenqueue_orphaned_after_mut(&mut self) -> &mut Duration { + &mut self.reenqueue_orphaned_after + } + + /// Occasionally some workers die, or abandon jobs because of panics. + /// This is the time a task takes before its back to the queue + /// + /// Defaults to 5 minutes + pub fn set_reenqueue_orphaned_after(mut self, after: Duration) -> Self { + self.reenqueue_orphaned_after = after; + self + } } /// Represents a [Storage] that uses Redis for storage. @@ -400,6 +421,9 @@ where let config = self.config.clone(); let stream: RequestStream> = Box::pin(rx); let heartbeat = async move { + let mut reenqueue_orphaned_stm = + apalis_core::interval::interval(config.poll_interval).fuse(); + let mut keep_alive_stm = apalis_core::interval::interval(config.keep_alive).fuse(); let mut enqueue_scheduled_stm = @@ -448,6 +472,13 @@ where } } } + _ = reenqueue_orphaned_stm.next() => { + let dead_since = Utc::now() + - chrono::Duration::from_std(config.reenqueue_orphaned_after).unwrap(); + if let Err(e) = self.reenqueue_orphaned((config.buffer_size * 10) as i32, dead_since).await { + error!("ReenqueueOrphanedError: {}", e); + } + } }; } }; @@ -875,17 +906,21 @@ where .invoke_async(&mut self.conn) .await } - /// Re-enqueue some jobs that might be orphaned. + /// Re-enqueue some jobs that might be orphaned after a number of seconds pub async fn reenqueue_orphaned( &mut self, - count: usize, - dead_since: i64, + count: i32, + dead_since: DateTime, ) -> Result { let reenqueue_orphaned = self.scripts.reenqueue_orphaned.clone(); let consumers_set = self.config.consumers_set(); let active_jobs_list = self.config.active_jobs_list(); let signal_list = self.config.signal_list(); + let now = Utc::now(); + let duration = now.signed_duration_since(dead_since); + let dead_since = duration.num_seconds(); + let res: Result = reenqueue_orphaned .key(consumers_set) .key(active_jobs_list) @@ -1043,11 +1078,21 @@ mod tests { let worker_id = register_worker_at(&mut storage).await; - let _job = consume_one(&mut storage, &worker_id).await; + let job = consume_one(&mut storage, &worker_id).await; + let dead_since = Utc::now() - chrono::Duration::from_std(Duration::from_secs(300)).unwrap(); storage - .reenqueue_orphaned(5, 300) + .reenqueue_orphaned(1, dead_since) .await .expect("failed to reenqueue_orphaned"); + let job = get_job(&mut storage, &job.parts.task_id).await; + let ctx = &job.parts.context; + // assert_eq!(*ctx.status(), State::Pending); + // assert!(ctx.done_at().is_none()); + assert!(ctx.lock_by.is_none()); + // assert!(ctx.lock_at().is_none()); + // assert_eq!(*ctx.last_error(), Some("Job was abandoned".to_owned())); + // TODO: Redis should store context aside + // assert_eq!(job.parts.attempt.current(), 1); } #[tokio::test] @@ -1058,10 +1103,19 @@ mod tests { let worker_id = register_worker_at(&mut storage).await; - let _job = consume_one(&mut storage, &worker_id).await; + let job = consume_one(&mut storage, &worker_id).await; + let dead_since = Utc::now() - chrono::Duration::from_std(Duration::from_secs(300)).unwrap(); storage - .reenqueue_orphaned(5, 300) + .reenqueue_orphaned(1, dead_since) .await .expect("failed to reenqueue_orphaned"); + let job = get_job(&mut storage, &job.parts.task_id).await; + let _ctx = &job.parts.context; + // assert_eq!(*ctx.status(), State::Running); + // TODO: update redis context + // assert_eq!(ctx.lock_by, Some(worker_id)); + // assert!(ctx.lock_at().is_some()); + // assert_eq!(*ctx.last_error(), None); + assert_eq!(job.parts.attempt.current(), 0); } } diff --git a/packages/apalis-sql/src/lib.rs b/packages/apalis-sql/src/lib.rs index af3bab92..0a9c6abb 100644 --- a/packages/apalis-sql/src/lib.rs +++ b/packages/apalis-sql/src/lib.rs @@ -45,6 +45,7 @@ pub struct Config { keep_alive: Duration, buffer_size: usize, poll_interval: Duration, + reenqueue_orphaned_after: Duration, namespace: String, } @@ -54,6 +55,7 @@ impl Default for Config { keep_alive: Duration::from_secs(30), buffer_size: 10, poll_interval: Duration::from_millis(100), + reenqueue_orphaned_after: Duration::from_secs(300), // 5 minutes namespace: String::from("apalis::sql"), } } @@ -131,6 +133,25 @@ impl Config { pub fn namespace_mut(&mut self) -> &mut String { &mut self.namespace } + + /// Gets the reenqueue_orphaned_after duration. + pub fn reenqueue_orphaned_after(&self) -> Duration { + self.reenqueue_orphaned_after + } + + /// Gets a mutable reference to the reenqueue_orphaned_after. + pub fn reenqueue_orphaned_after_mut(&mut self) -> &mut Duration { + &mut self.reenqueue_orphaned_after + } + + /// Occasionally some workers die, or abandon jobs because of panics. + /// This is the time a task takes before its back to the queue + /// + /// Defaults to 5 minutes + pub fn set_reenqueue_orphaned_after(mut self, after: Duration) -> Self { + self.reenqueue_orphaned_after = after; + self + } } /// Calculates the status from a result diff --git a/packages/apalis-sql/src/mysql.rs b/packages/apalis-sql/src/mysql.rs index f9a17963..1795fecd 100644 --- a/packages/apalis-sql/src/mysql.rs +++ b/packages/apalis-sql/src/mysql.rs @@ -374,7 +374,7 @@ where impl Backend, Res> for MysqlStorage where Req: Serialize + DeserializeOwned + Sync + Send + Unpin + 'static, - C: Debug + Codec + Clone + Send + 'static, + C: Debug + Codec + Clone + Send + 'static + Sync, { type Stream = BackendStream>>; @@ -387,6 +387,7 @@ where let pool = self.pool.clone(); let ack_notify = self.ack_notify.clone(); let mut hb_storage = self.clone(); + let requeue_storage = self.clone(); let stream = self .stream_jobs(&worker, config.poll_interval, config.buffer_size) .map_err(|e| Error::SourceError(Arc::new(Box::new(e)))); @@ -400,7 +401,7 @@ where .await { for (ctx, res) in ids { - let query = "UPDATE jobs SET status = ?, done_at = now(), last_error = ?, attempts = ? WHERE id = ? AND lock_by = ?"; + let query = "UPDATE jobs SET status = ?, done_at = now(), last_error = ? WHERE id = ? AND lock_by = ?"; let query = sqlx::query(query); let query = query .bind(calculate_status(&res.inner).to_string()) @@ -408,7 +409,6 @@ where serde_json::to_string(&res.inner.as_ref().map_err(|e| e.to_string())) .unwrap(), ) - .bind(res.attempt.current() as u64 + 1) .bind(res.task_id.to_string()) .bind(ctx.lock_by().as_ref().unwrap().to_string()); if let Err(e) = query.execute(&pool).await { @@ -429,10 +429,23 @@ where apalis_core::sleep(config.keep_alive).await; } }; + let reenqueue_beat = async move { + loop { + let dead_since = Utc::now() + - chrono::Duration::from_std(config.reenqueue_orphaned_after).unwrap(); + if let Err(e) = requeue_storage + .reenqueue_orphaned(config.buffer_size.try_into().unwrap(), dead_since) + .await + { + error!("ReenqueueOrphaned failed: {e}"); + } + apalis_core::sleep(config.poll_interval).await; + } + }; Poller::new_with_layer( stream, async { - futures::join!(heartbeat, ack_heartbeat); + futures::join!(heartbeat, ack_heartbeat, reenqueue_beat); }, layer, ) @@ -493,27 +506,22 @@ impl MysqlStorage { } /// Readd jobs that are abandoned to the queue - pub async fn reenqueue_orphaned(&self, timeout: i64) -> Result { + pub async fn reenqueue_orphaned( + &self, + count: i32, + dead_since: DateTime, + ) -> Result { let job_type = self.config.namespace.clone(); let mut tx = self.pool.acquire().await?; let query = r#"Update jobs INNER JOIN ( SELECT workers.id as worker_id, jobs.id as job_id from workers INNER JOIN jobs ON jobs.lock_by = workers.id WHERE jobs.status = "Running" AND workers.last_seen < ? AND workers.worker_type = ? ORDER BY lock_at ASC LIMIT ?) as workers ON jobs.lock_by = workers.worker_id AND jobs.id = workers.job_id SET status = "Pending", done_at = NULL, lock_by = NULL, lock_at = NULL, last_error ="Job was abandoned";"#; - let now = Utc::now().timestamp(); - let seconds_ago = DateTime::from_timestamp(now - timeout, 0).ok_or(sqlx::Error::Io( - io::Error::new(io::ErrorKind::InvalidData, "Invalid timeout"), - ))?; sqlx::query(query) - .bind(seconds_ago) + .bind(dead_since) .bind(job_type) - .bind::( - self.config - .buffer_size - .try_into() - .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?, - ) + .bind(count) .execute(&mut *tx) .await?; Ok(true) @@ -686,6 +694,8 @@ mod tests { // register a worker not responding since 6 minutes ago let worker_id = WorkerId::new("test-worker"); + let five_minutes_ago = Utc::now() - Duration::from_secs(5 * 60); + let six_minutes_ago = Utc::now() - Duration::from_secs(60 * 6); storage @@ -699,7 +709,10 @@ mod tests { assert_eq!(*ctx.status(), State::Running); - storage.reenqueue_orphaned(300).await.unwrap(); + storage + .reenqueue_orphaned(1, five_minutes_ago) + .await + .unwrap(); // then, the job status has changed to Pending let job = storage @@ -707,12 +720,13 @@ mod tests { .await .unwrap() .unwrap(); - let context = job.parts.context; - assert_eq!(*context.status(), State::Pending); - assert!(context.lock_by().is_none()); - assert!(context.lock_at().is_none()); - assert!(context.done_at().is_none()); - assert_eq!(*context.last_error(), Some("Job was abandoned".to_string())); + let ctx = job.parts.context; + assert_eq!(*ctx.status(), State::Pending); + assert!(ctx.done_at().is_none()); + assert!(ctx.lock_by().is_none()); + assert!(ctx.lock_at().is_none()); + assert_eq!(*ctx.last_error(), Some("Job was abandoned".to_owned())); + assert_eq!(job.parts.attempt.current(), 1); } #[tokio::test] @@ -727,6 +741,7 @@ mod tests { // register a worker responding at 4 minutes ago let four_minutes_ago = Utc::now() - Duration::from_secs(4 * 60); + let six_minutes_ago = Utc::now() - Duration::from_secs(6 * 60); let worker_id = WorkerId::new("test-worker"); storage @@ -741,7 +756,10 @@ mod tests { assert_eq!(*ctx.status(), State::Running); // heartbeat with ReenqueueOrpharned pulse - storage.reenqueue_orphaned(300).await.unwrap(); + storage + .reenqueue_orphaned(1, six_minutes_ago) + .await + .unwrap(); // then, the job status is not changed let job = storage @@ -749,9 +767,11 @@ mod tests { .await .unwrap() .unwrap(); - let context = job.parts.context; - // TODO: Fix assertions - assert_eq!(*context.status(), State::Running); - assert_eq!(*context.lock_by(), Some(worker_id.clone())); + let ctx = job.parts.context; + assert_eq!(*ctx.status(), State::Running); + assert_eq!(*ctx.lock_by(), Some(worker_id)); + assert!(ctx.lock_at().is_some()); + assert_eq!(*ctx.last_error(), None); + assert_eq!(job.parts.attempt.current(), 1); } } diff --git a/packages/apalis-sql/src/postgres.rs b/packages/apalis-sql/src/postgres.rs index 5b6ac270..eacf3a9b 100644 --- a/packages/apalis-sql/src/postgres.rs +++ b/packages/apalis-sql/src/postgres.rs @@ -137,6 +137,8 @@ where let pool = self.pool.clone(); let heartbeat = async move { let mut keep_alive_stm = apalis_core::interval::interval(config.keep_alive).fuse(); + let mut reenqueue_orphaned_stm = + apalis_core::interval::interval(config.poll_interval).fuse(); let mut ack_stream = ack_notify.clone().ready_chunks(config.buffer_size).fuse(); let mut poll_next_stm = apalis_core::interval::interval(config.poll_interval).fuse(); @@ -221,6 +223,13 @@ where error!("PgNotificationError: {e}"); } } + _ = reenqueue_orphaned_stm.next() => { + let dead_since = Utc::now() + - chrono::Duration::from_std(config.reenqueue_orphaned_after).unwrap(); + if let Err(e) = self.reenqueue_orphaned((config.buffer_size * 10) as i32, dead_since).await { + error!("ReenqueueOrphanedError: {}", e); + } + } }; @@ -615,18 +624,27 @@ impl PostgresStorage { } /// Reenqueue jobs that have been abandoned by their workers - pub async fn reenqueue_orphaned(&mut self, count: i32) -> Result<(), sqlx::Error> { + pub async fn reenqueue_orphaned( + &mut self, + count: i32, + dead_since: DateTime, + ) -> Result<(), sqlx::Error> { let job_type = self.config.namespace.clone(); let mut tx = self.pool.acquire().await?; - let query = "Update apalis.jobs - SET status = 'Pending', done_at = NULL, lock_by = NULL, lock_at = NULL, last_error ='Job was abandoned' - WHERE id in - (SELECT jobs.id from apalis.jobs INNER join apalis.workers ON lock_by = workers.id - WHERE status= 'Running' AND workers.last_seen < (NOW() - INTERVAL '300 seconds') - AND workers.worker_type = $1 ORDER BY lock_at ASC LIMIT $2);"; + let query = "UPDATE apalis.jobs + SET status = 'Pending', done_at = NULL, lock_by = NULL, lock_at = NULL, last_error = 'Job was abandoned' + WHERE id IN + (SELECT jobs.id FROM apalis.jobs INNER JOIN apalis.workers ON lock_by = workers.id + WHERE status = 'Running' + AND workers.last_seen < ($3::timestamp) + AND workers.worker_type = $1 + ORDER BY lock_at ASC + LIMIT $2);"; + sqlx::query(query) .bind(job_type) .bind(count) + .bind(dead_since) .execute(&mut *tx) .await?; Ok(()) @@ -785,11 +803,13 @@ mod tests { push_email(&mut storage, example_email()).await; let six_minutes_ago = Utc::now() - Duration::from_secs(6 * 60); + let five_minutes_ago = Utc::now() - Duration::from_secs(5 * 60); + let worker_id = register_worker_at(&mut storage, six_minutes_ago.timestamp()).await; let job = consume_one(&mut storage, &worker_id).await; storage - .reenqueue_orphaned(5) + .reenqueue_orphaned(1, five_minutes_ago) .await .expect("failed to heartbeat"); let job_id = &job.parts.task_id; @@ -800,7 +820,8 @@ mod tests { assert!(ctx.done_at().is_none()); assert!(ctx.lock_by().is_none()); assert!(ctx.lock_at().is_none()); - assert_eq!(*ctx.last_error(), Some("Job was abandoned".to_string())); + assert_eq!(*ctx.last_error(), Some("Job was abandoned".to_owned())); + assert_eq!(job.parts.attempt.current(), 0); // TODO: update get_jobs to increase attempts } #[tokio::test] @@ -810,6 +831,7 @@ mod tests { push_email(&mut storage, example_email()).await; let four_minutes_ago = Utc::now() - Duration::from_secs(4 * 60); + let six_minutes_ago = Utc::now() - Duration::from_secs(6 * 60); let worker_id = register_worker_at(&mut storage, four_minutes_ago.timestamp()).await; @@ -818,15 +840,17 @@ mod tests { assert_eq!(*ctx.status(), State::Running); storage - .reenqueue_orphaned(5) + .reenqueue_orphaned(1, six_minutes_ago) .await .expect("failed to heartbeat"); let job_id = &job.parts.task_id; let job = get_job(&mut storage, job_id).await; let ctx = job.parts.context; - assert_eq!(*ctx.status(), State::Running); - assert_eq!(*ctx.lock_by(), Some(worker_id.clone())); + assert_eq!(*ctx.lock_by(), Some(worker_id)); + assert!(ctx.lock_at().is_some()); + assert_eq!(*ctx.last_error(), None); + assert_eq!(job.parts.attempt.current(), 0); } } diff --git a/packages/apalis-sql/src/sqlite.rs b/packages/apalis-sql/src/sqlite.rs index ecbc9f0f..301d8c83 100644 --- a/packages/apalis-sql/src/sqlite.rs +++ b/packages/apalis-sql/src/sqlite.rs @@ -14,8 +14,9 @@ use apalis_core::task::task_id::TaskId; use apalis_core::worker::WorkerId; use apalis_core::{Backend, Codec}; use async_stream::try_stream; -use chrono::Utc; +use chrono::{DateTime, Utc}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; +use log::error; use serde::{de::DeserializeOwned, Serialize}; use sqlx::{Pool, Row, Sqlite}; use std::any::type_name; @@ -157,7 +158,7 @@ async fn fetch_next( config: &Config, ) -> Result>, sqlx::Error> { let now: i64 = Utc::now().timestamp(); - let update_query = "UPDATE Jobs SET status = 'Running', lock_by = ?2, lock_at = ?3 WHERE id = ?1 AND job_type = ?4 AND status = 'Pending' AND lock_by IS NULL; Select * from Jobs where id = ?1 AND lock_by = ?2 AND job_type = ?4"; + let update_query = "UPDATE Jobs SET status = 'Running', lock_by = ?2, lock_at = ?3, attempts = attempts + 1 WHERE id = ?1 AND job_type = ?4 AND status = 'Pending' AND lock_by IS NULL; Select * from Jobs where id = ?1 AND lock_by = ?2 AND job_type = ?4"; let job: Option> = sqlx::query_as(update_query) .bind(id.to_string()) .bind(worker_id.to_string()) @@ -423,7 +424,11 @@ impl SqliteStorage { } /// Add jobs that workers have disappeared to the queue - pub async fn reenqueue_orphaned(&self, timeout: i64) -> Result<(), sqlx::Error> { + pub async fn reenqueue_orphaned( + &self, + count: i32, + dead_since: DateTime, + ) -> Result<(), sqlx::Error> { let job_type = self.config.namespace.clone(); let mut tx = self.pool.acquire().await?; let query = r#"Update Jobs @@ -434,9 +439,9 @@ impl SqliteStorage { AND Workers.worker_type = ?2 ORDER BY lock_at ASC LIMIT ?3);"#; sqlx::query(query) - .bind(timeout) + .bind(dead_since.timestamp()) .bind(job_type) - .bind::(self.config.buffer_size.try_into().unwrap()) + .bind(count) .execute(&mut *tx) .await?; Ok(()) @@ -457,6 +462,7 @@ impl .stream_jobs(&worker, config.poll_interval, config.buffer_size) .map_err(|e| Error::SourceError(Arc::new(Box::new(e)))); let stream = BackendStream::new(stream.boxed(), controller); + let requeue_storage = self.clone(); let heartbeat = async move { loop { let now: i64 = Utc::now().timestamp(); @@ -467,7 +473,26 @@ impl } } .boxed(); - Poller::new_with_layer(stream, heartbeat, layer) + let reenqueue_beat = async move { + loop { + let dead_since = Utc::now() + - chrono::Duration::from_std(config.reenqueue_orphaned_after).unwrap(); + if let Err(e) = requeue_storage + .reenqueue_orphaned(config.buffer_size.try_into().unwrap(), dead_since) + .await + { + error!("ReenqueueOrphaned failed: {e}"); + } + apalis_core::sleep(config.poll_interval).await; + } + }; + Poller::new_with_layer( + stream, + async { + futures::join!(heartbeat, reenqueue_beat); + }, + layer, + ) } } @@ -477,7 +502,7 @@ impl Ack for SqliteStorage { async fn ack(&mut self, ctx: &Self::Context, res: &Response) -> Result<(), sqlx::Error> { let pool = self.pool.clone(); let query = - "UPDATE Jobs SET status = ?4, done_at = strftime('%s','now'), last_error = ?3, attempts =?5 WHERE id = ?1 AND lock_by = ?2"; + "UPDATE Jobs SET status = ?4, done_at = strftime('%s','now'), last_error = ?3 WHERE id = ?1 AND lock_by = ?2"; let result = serde_json::to_string(&res.inner.as_ref().map_err(|r| r.to_string())) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; sqlx::query(query) @@ -485,7 +510,6 @@ impl Ack for SqliteStorage { .bind(ctx.lock_by().as_ref().unwrap().to_string()) .bind(result) .bind(calculate_status(&res.inner).to_string()) - .bind(res.attempt.current() as i64 + 1) .execute(&pool) .await?; Ok(()) @@ -655,21 +679,23 @@ mod tests { let six_minutes_ago = Utc::now() - Duration::from_secs(6 * 60); + let five_minutes_ago = Utc::now() - Duration::from_secs(5 * 60); let worker_id = register_worker_at(&mut storage, six_minutes_ago.timestamp()).await; let job = consume_one(&mut storage, &worker_id).await; let job_id = &job.parts.task_id; storage - .reenqueue_orphaned(six_minutes_ago.timestamp()) + .reenqueue_orphaned(1, five_minutes_ago) .await .expect("failed to heartbeat"); let job = get_job(&mut storage, job_id).await; let ctx = &job.parts.context; - assert_eq!(*ctx.status(), State::Running); + assert_eq!(*ctx.status(), State::Pending); assert!(ctx.done_at().is_none()); - assert!(ctx.lock_by().is_some()); - assert!(ctx.lock_at().is_some()); - assert_eq!(*ctx.last_error(), None); + assert!(ctx.lock_by().is_none()); + assert!(ctx.lock_at().is_none()); + assert_eq!(*ctx.last_error(), Some("Job was abandoned".to_owned())); + assert_eq!(job.parts.attempt.current(), 1); } #[tokio::test] @@ -678,13 +704,14 @@ mod tests { push_email(&mut storage, example_good_email()).await; + let six_minutes_ago = Utc::now() - Duration::from_secs(6 * 60); let four_minutes_ago = Utc::now() - Duration::from_secs(4 * 60); let worker_id = register_worker_at(&mut storage, four_minutes_ago.timestamp()).await; let job = consume_one(&mut storage, &worker_id).await; let job_id = &job.parts.task_id; storage - .reenqueue_orphaned(four_minutes_ago.timestamp()) + .reenqueue_orphaned(1, six_minutes_ago) .await .expect("failed to heartbeat"); @@ -692,5 +719,8 @@ mod tests { let ctx = &job.parts.context; assert_eq!(*ctx.status(), State::Running); assert_eq!(*ctx.lock_by(), Some(worker_id)); + assert!(ctx.lock_at().is_some()); + assert_eq!(*ctx.last_error(), None); + assert_eq!(job.parts.attempt.current(), 1); } } From 9863a60a674c337222478f04176ebaf90fea44cd Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Sat, 23 Nov 2024 10:13:46 +0300 Subject: [PATCH 58/59] feat: allow backends to emit errors (#454) * feat: allow backends to emit errors * lint: fmt * fix: pass in a reference to prevent mutation --- examples/redis-mq-example/src/main.rs | 2 +- packages/apalis-core/src/lib.rs | 9 +-- packages/apalis-core/src/memory.rs | 4 +- packages/apalis-core/src/request.rs | 6 +- packages/apalis-core/src/worker/mod.rs | 4 +- packages/apalis-cron/src/lib.rs | 6 +- packages/apalis-redis/Cargo.toml | 1 + packages/apalis-redis/src/lib.rs | 1 + packages/apalis-redis/src/storage.rs | 55 ++++++++++++---- packages/apalis-sql/Cargo.toml | 1 + packages/apalis-sql/src/mysql.rs | 91 +++++++++++++++++++------- packages/apalis-sql/src/postgres.rs | 86 +++++++++++++++++------- packages/apalis-sql/src/sqlite.rs | 45 ++++++++++--- 13 files changed, 228 insertions(+), 83 deletions(-) diff --git a/examples/redis-mq-example/src/main.rs b/examples/redis-mq-example/src/main.rs index 9a73e616..1cfe723f 100644 --- a/examples/redis-mq-example/src/main.rs +++ b/examples/redis-mq-example/src/main.rs @@ -56,7 +56,7 @@ where type Layer = AckLayer; - fn poll(mut self, _worker_id: WorkerId) -> Poller { + fn poll(mut self, _worker: &Worker) -> Poller { let (mut tx, rx) = mpsc::channel(self.config.get_buffer_size()); let stream: RequestStream> = Box::pin(rx); let layer = AckLayer::new(self.clone()); diff --git a/packages/apalis-core/src/lib.rs b/packages/apalis-core/src/lib.rs index f5475bb6..a0dc50f5 100644 --- a/packages/apalis-core/src/lib.rs +++ b/packages/apalis-core/src/lib.rs @@ -27,7 +27,7 @@ use futures::Stream; use poller::Poller; use serde::{Deserialize, Serialize}; use tower::Service; -use worker::WorkerId; +use worker::{Context, Worker}; /// Represent utilities for creating worker instances. pub mod builder; @@ -81,7 +81,7 @@ pub trait Backend { /// Returns a poller that is ready for streaming fn poll>( self, - worker: WorkerId, + worker: &Worker, ) -> Poller; } /// A codec allows backends to encode and decode data @@ -165,7 +165,7 @@ pub mod test_utils { use crate::error::BoxDynError; use crate::request::Request; use crate::task::task_id::TaskId; - use crate::worker::WorkerId; + use crate::worker::{Worker, WorkerId}; use crate::Backend; use futures::channel::mpsc::{channel, Receiver, Sender}; use futures::future::BoxFuture; @@ -264,8 +264,9 @@ pub mod test_utils { >>::Future: Send + 'static, { let worker_id = WorkerId::new("test-worker"); + let worker = Worker::new(worker_id, crate::worker::Context::default()); let b = backend.clone(); - let mut poller = b.poll::(worker_id); + let mut poller = b.poll::(&worker); let (stop_tx, mut stop_rx) = channel::<()>(1); let (mut res_tx, res_rx) = channel(10); diff --git a/packages/apalis-core/src/memory.rs b/packages/apalis-core/src/memory.rs index 56459927..68acc80e 100644 --- a/packages/apalis-core/src/memory.rs +++ b/packages/apalis-core/src/memory.rs @@ -2,7 +2,7 @@ use crate::{ mq::MessageQueue, poller::{controller::Controller, stream::BackendStream}, request::{Request, RequestStream}, - worker::WorkerId, + worker::{self, Worker}, Backend, Poller, }; use futures::{ @@ -101,7 +101,7 @@ impl Backend, Res> for MemoryStora type Layer = Identity; - fn poll(self, _worker: WorkerId) -> Poller { + fn poll(self, _worker: &Worker) -> Poller { let stream = self.inner.map(|r| Ok(Some(r))).boxed(); Poller { stream: BackendStream::new(stream, self.controller), diff --git a/packages/apalis-core/src/request.rs b/packages/apalis-core/src/request.rs index 428b4a89..d86cdd78 100644 --- a/packages/apalis-core/src/request.rs +++ b/packages/apalis-core/src/request.rs @@ -9,7 +9,7 @@ use crate::{ error::Error, poller::Poller, task::{attempt::Attempt, namespace::Namespace, task_id::TaskId}, - worker::WorkerId, + worker::{Context, Worker}, Backend, }; @@ -111,10 +111,10 @@ impl Backend, Res> for RequestStream(self, _worker: WorkerId) -> Poller { + fn poll(self, _worker: &Worker) -> Poller { Poller { stream: self, - heartbeat: Box::pin(async {}), + heartbeat: Box::pin(futures::future::pending()), layer: Identity::new(), } } diff --git a/packages/apalis-core/src/worker/mod.rs b/packages/apalis-core/src/worker/mod.rs index 2557b31e..d3fb3640 100644 --- a/packages/apalis-core/src/worker/mod.rs +++ b/packages/apalis-core/src/worker/mod.rs @@ -303,7 +303,7 @@ impl Worker> { }; let backend = self.state.backend; let service = self.state.service; - let poller = backend.poll::(worker_id.clone()); + let poller = backend.poll::(&worker); let stream = poller.stream; let heartbeat = poller.heartbeat.boxed(); let layer = poller.layer; @@ -387,7 +387,7 @@ impl Future for Runnable { } /// Stores the Workers context -#[derive(Clone)] +#[derive(Clone, Default)] pub struct Context { task_count: Arc, wakers: Arc>>, diff --git a/packages/apalis-cron/src/lib.rs b/packages/apalis-cron/src/lib.rs index 621f002d..5fcb8789 100644 --- a/packages/apalis-cron/src/lib.rs +++ b/packages/apalis-cron/src/lib.rs @@ -61,7 +61,7 @@ use apalis_core::layers::Identity; use apalis_core::poller::Poller; use apalis_core::request::RequestStream; use apalis_core::task::namespace::Namespace; -use apalis_core::worker::WorkerId; +use apalis_core::worker::{Context, Worker}; use apalis_core::Backend; use apalis_core::{error::Error, request::Request}; use chrono::{DateTime, TimeZone, Utc}; @@ -145,8 +145,8 @@ where type Layer = Identity; - fn poll(self, _worker: WorkerId) -> Poller { + fn poll(self, _worker: &Worker) -> Poller { let stream = self.into_stream(); - Poller::new(stream, async {}) + Poller::new(stream, futures::future::pending()) } } diff --git a/packages/apalis-redis/Cargo.toml b/packages/apalis-redis/Cargo.toml index 9286d52e..f622029e 100644 --- a/packages/apalis-redis/Cargo.toml +++ b/packages/apalis-redis/Cargo.toml @@ -34,6 +34,7 @@ tokio = { version = "1", features = ["rt", "net"], optional = true } async-std = { version = "1.13.0", optional = true } async-trait = "0.1.80" tower = "0.4" +thiserror = "1" [dev-dependencies] diff --git a/packages/apalis-redis/src/lib.rs b/packages/apalis-redis/src/lib.rs index bffe5818..764e4abe 100644 --- a/packages/apalis-redis/src/lib.rs +++ b/packages/apalis-redis/src/lib.rs @@ -31,5 +31,6 @@ mod storage; pub use storage::connect; pub use storage::Config; pub use storage::RedisContext; +pub use storage::RedisPollError; pub use storage::RedisQueueInfo; pub use storage::RedisStorage; diff --git a/packages/apalis-redis/src/storage.rs b/packages/apalis-redis/src/storage.rs index 1f530480..a2609d35 100644 --- a/packages/apalis-redis/src/storage.rs +++ b/packages/apalis-redis/src/storage.rs @@ -10,10 +10,10 @@ use apalis_core::service_fn::FromRequest; use apalis_core::storage::Storage; use apalis_core::task::namespace::Namespace; use apalis_core::task::task_id::TaskId; -use apalis_core::worker::WorkerId; +use apalis_core::worker::{Event, Worker, WorkerId}; use apalis_core::{Backend, Codec}; use chrono::{DateTime, Utc}; -use futures::channel::mpsc::{self, Sender}; +use futures::channel::mpsc::{self, SendError, Sender}; use futures::{select, FutureExt, SinkExt, StreamExt, TryFutureExt}; use log::*; use redis::aio::ConnectionLike; @@ -106,6 +106,34 @@ impl FromRequest> for RedisContext { } } +/// Errors that can occur while polling a Redis backend. +#[derive(thiserror::Error, Debug)] +pub enum RedisPollError { + /// Error during a keep-alive heartbeat. + #[error("KeepAlive heartbeat encountered an error: `{0}`")] + KeepAliveError(RedisError), + + /// Error during enqueueing scheduled tasks. + #[error("EnqueueScheduled heartbeat encountered an error: `{0}`")] + EnqueueScheduledError(RedisError), + + /// Error during polling for the next task or message. + #[error("PollNext heartbeat encountered an error: `{0}`")] + PollNextError(RedisError), + + /// Error during enqueueing tasks for worker consumption. + #[error("Enqueue for worker consumption encountered an error: `{0}`")] + EnqueueError(SendError), + + /// Error during acknowledgment of tasks. + #[error("Ack heartbeat encountered an error: `{0}`")] + AckError(RedisError), + + /// Error during re-enqueuing orphaned tasks. + #[error("ReenqueueOrphaned heartbeat encountered an error: `{0}`")] + ReenqueueOrphanedError(RedisError), +} + /// Config for a [RedisStorage] #[derive(Clone, Debug)] pub struct Config { @@ -412,7 +440,7 @@ where fn poll>>( mut self, - worker: WorkerId, + worker: &Worker, ) -> Poller { let (mut tx, rx) = mpsc::channel(self.config.buffer_size); let (ack, ack_rx) = mpsc::channel(self.config.buffer_size); @@ -420,6 +448,7 @@ where let controller = self.controller.clone(); let config = self.config.clone(); let stream: RequestStream> = Box::pin(rx); + let worker = worker.clone(); let heartbeat = async move { let mut reenqueue_orphaned_stm = apalis_core::interval::interval(config.poll_interval).fuse(); @@ -433,32 +462,32 @@ where let mut ack_stream = ack_rx.fuse(); - if let Err(e) = self.keep_alive(&worker).await { - error!("RegistrationError: {}", e); + if let Err(e) = self.keep_alive(worker.id()).await { + worker.emit(Event::Error(Box::new(RedisPollError::KeepAliveError(e)))); } loop { select! { _ = keep_alive_stm.next() => { - if let Err(e) = self.keep_alive(&worker).await { - error!("KeepAliveError: {}", e); + if let Err(e) = self.keep_alive(worker.id()).await { + worker.emit(Event::Error(Box::new(RedisPollError::KeepAliveError(e)))); } } _ = enqueue_scheduled_stm.next() => { if let Err(e) = self.enqueue_scheduled(config.buffer_size).await { - error!("EnqueueScheduledError: {}", e); + worker.emit(Event::Error(Box::new(RedisPollError::EnqueueScheduledError(e)))); } } _ = poll_next_stm.next() => { - let res = self.fetch_next(&worker).await; + let res = self.fetch_next(worker.id()).await; match res { Err(e) => { - error!("PollNextError: {}", e); + worker.emit(Event::Error(Box::new(RedisPollError::PollNextError(e)))); } Ok(res) => { for job in res { if let Err(e) = tx.send(Ok(Some(job))).await { - error!("EnqueueError: {}", e); + worker.emit(Event::Error(Box::new(RedisPollError::EnqueueError(e)))); } } } @@ -468,7 +497,7 @@ where id_to_ack = ack_stream.next() => { if let Some((ctx, res)) = id_to_ack { if let Err(e) = self.ack(&ctx, &res).await { - error!("AckError: {}", e); + worker.emit(Event::Error(Box::new(RedisPollError::AckError(e)))); } } } @@ -476,7 +505,7 @@ where let dead_since = Utc::now() - chrono::Duration::from_std(config.reenqueue_orphaned_after).unwrap(); if let Err(e) = self.reenqueue_orphaned((config.buffer_size * 10) as i32, dead_since).await { - error!("ReenqueueOrphanedError: {}", e); + worker.emit(Event::Error(Box::new(RedisPollError::ReenqueueOrphanedError(e)))); } } }; diff --git a/packages/apalis-sql/Cargo.toml b/packages/apalis-sql/Cargo.toml index 7f147065..90680039 100644 --- a/packages/apalis-sql/Cargo.toml +++ b/packages/apalis-sql/Cargo.toml @@ -37,6 +37,7 @@ tokio = { version = "1", features = ["rt", "net"], optional = true } futures-lite = "2.3.0" async-std = { version = "1.13.0", optional = true } chrono = { version = "0.4", features = ["serde"] } +thiserror = "1" [dev-dependencies] diff --git a/packages/apalis-sql/src/mysql.rs b/packages/apalis-sql/src/mysql.rs index 1795fecd..5b235fc4 100644 --- a/packages/apalis-sql/src/mysql.rs +++ b/packages/apalis-sql/src/mysql.rs @@ -1,5 +1,5 @@ use apalis_core::codec::json::JsonCodec; -use apalis_core::error::Error; +use apalis_core::error::{BoxDynError, Error}; use apalis_core::layers::{Ack, AckLayer}; use apalis_core::notify::Notify; use apalis_core::poller::controller::Controller; @@ -10,7 +10,7 @@ use apalis_core::response::Response; use apalis_core::storage::Storage; use apalis_core::task::namespace::Namespace; use apalis_core::task::task_id::TaskId; -use apalis_core::worker::WorkerId; +use apalis_core::worker::{Context, Event, Worker, WorkerId}; use apalis_core::{Backend, Codec}; use async_stream::try_stream; use chrono::{DateTime, Utc}; @@ -180,8 +180,7 @@ where yield { let (req, ctx) = job.req.take_parts(); let req = C::decode(req) - .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e))) - .unwrap(); + .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; let mut req: Request = Request::new_with_parts(req, ctx); req.parts.namespace = Some(Namespace(self.config.namespace.clone())); Some(req) @@ -371,16 +370,37 @@ where } } +/// Errors that can occur while polling a MySQL database. +#[derive(thiserror::Error, Debug)] +pub enum MysqlPollError { + /// Error during task acknowledgment. + #[error("Encountered an error during ACK: `{0}`")] + AckError(sqlx::Error), + + /// Error during result encoding. + #[error("Encountered an error during encoding the result: {0}")] + CodecError(BoxDynError), + + /// Error during a keep-alive heartbeat. + #[error("Encountered an error during KeepAlive heartbeat: `{0}`")] + KeepAliveError(sqlx::Error), + + /// Error during re-enqueuing orphaned tasks. + #[error("Encountered an error during ReenqueueOrphaned heartbeat: `{0}`")] + ReenqueueOrphanedError(sqlx::Error), +} + impl Backend, Res> for MysqlStorage where Req: Serialize + DeserializeOwned + Sync + Send + Unpin + 'static, C: Debug + Codec + Clone + Send + 'static + Sync, + C::Error: std::error::Error + 'static + Send + Sync, { type Stream = BackendStream>>; type Layer = AckLayer, Req, SqlContext, Res>; - fn poll(self, worker: WorkerId) -> Poller { + fn poll(self, worker: &Worker) -> Poller { let layer = AckLayer::new(self.clone()); let config = self.config.clone(); let controller = self.controller.clone(); @@ -389,9 +409,10 @@ where let mut hb_storage = self.clone(); let requeue_storage = self.clone(); let stream = self - .stream_jobs(&worker, config.poll_interval, config.buffer_size) + .stream_jobs(worker.id(), config.poll_interval, config.buffer_size) .map_err(|e| Error::SourceError(Arc::new(Box::new(e)))); let stream = BackendStream::new(stream.boxed(), controller); + let w = worker.clone(); let ack_heartbeat = async move { while let Some(ids) = ack_notify @@ -403,41 +424,62 @@ where for (ctx, res) in ids { let query = "UPDATE jobs SET status = ?, done_at = now(), last_error = ? WHERE id = ? AND lock_by = ?"; let query = sqlx::query(query); - let query = query - .bind(calculate_status(&res.inner).to_string()) - .bind( - serde_json::to_string(&res.inner.as_ref().map_err(|e| e.to_string())) - .unwrap(), - ) - .bind(res.task_id.to_string()) - .bind(ctx.lock_by().as_ref().unwrap().to_string()); - if let Err(e) = query.execute(&pool).await { - error!("Ack failed: {e}"); + let last_result = + C::encode(res.inner.as_ref().map_err(|e| e.to_string())).map_err(Box::new); + match (last_result, ctx.lock_by()) { + (Ok(val), Some(worker_id)) => { + let query = query + .bind(calculate_status(&res.inner).to_string()) + .bind(val) + .bind(res.task_id.to_string()) + .bind(worker_id.to_string()); + if let Err(e) = query.execute(&pool).await { + w.emit(Event::Error(Box::new(MysqlPollError::AckError(e)))); + } + } + (Err(error), Some(_)) => { + w.emit(Event::Error(Box::new(MysqlPollError::CodecError(error)))); + } + _ => { + unreachable!( + "Attempted to ACK without a worker attached. This is a bug, File it on the repo" + ); + } } } apalis_core::sleep(config.poll_interval).await; } }; - + let w = worker.clone(); let heartbeat = async move { loop { let now = Utc::now(); - if let Err(e) = hb_storage.keep_alive_at::(&worker, now).await { - error!("Heartbeat failed: {e}"); + if let Err(e) = hb_storage.keep_alive_at::(w.id(), now).await { + w.emit(Event::Error(Box::new(MysqlPollError::KeepAliveError(e)))); } apalis_core::sleep(config.keep_alive).await; } }; + let w = worker.clone(); let reenqueue_beat = async move { loop { let dead_since = Utc::now() - - chrono::Duration::from_std(config.reenqueue_orphaned_after).unwrap(); + - chrono::Duration::from_std(config.reenqueue_orphaned_after) + .expect("Could not calculate dead since"); if let Err(e) = requeue_storage - .reenqueue_orphaned(config.buffer_size.try_into().unwrap(), dead_since) + .reenqueue_orphaned( + config + .buffer_size + .try_into() + .expect("Could not convert usize to i32"), + dead_since, + ) .await { - error!("ReenqueueOrphaned failed: {e}"); + w.emit(Event::Error(Box::new( + MysqlPollError::ReenqueueOrphanedError(e), + ))); } apalis_core::sleep(config.poll_interval).await; } @@ -463,7 +505,10 @@ where type AckError = sqlx::Error; async fn ack(&mut self, ctx: &Self::Context, res: &Response) -> Result<(), sqlx::Error> { self.ack_notify - .notify((ctx.clone(), res.map(|res| C::encode(res).unwrap()))) + .notify(( + ctx.clone(), + res.map(|res| C::encode(res).expect("Could not encode result")), + )) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::BrokenPipe, e)))?; Ok(()) diff --git a/packages/apalis-sql/src/postgres.rs b/packages/apalis-sql/src/postgres.rs index eacf3a9b..5ba1fb55 100644 --- a/packages/apalis-sql/src/postgres.rs +++ b/packages/apalis-sql/src/postgres.rs @@ -41,7 +41,7 @@ use crate::context::SqlContext; use crate::{calculate_status, Config}; use apalis_core::codec::json::JsonCodec; -use apalis_core::error::Error; +use apalis_core::error::{BoxDynError, Error}; use apalis_core::layers::{Ack, AckLayer}; use apalis_core::notify::Notify; use apalis_core::poller::controller::Controller; @@ -52,7 +52,7 @@ use apalis_core::response::Response; use apalis_core::storage::Storage; use apalis_core::task::namespace::Namespace; use apalis_core::task::task_id::TaskId; -use apalis_core::worker::WorkerId; +use apalis_core::worker::{Context, Event, Worker, WorkerId}; use apalis_core::{Backend, Codec}; use chrono::{DateTime, Utc}; use futures::channel::mpsc; @@ -118,16 +118,45 @@ impl fmt::Debug for PostgresStorage { } } +/// Errors that can occur while polling a PostgreSQL database. +#[derive(thiserror::Error, Debug)] +pub enum PgPollError { + /// Error during task acknowledgment. + #[error("Encountered an error during ACK: `{0}`")] + AckError(sqlx::Error), + + /// Error while fetching the next item. + #[error("Encountered an error during FetchNext: `{0}`")] + FetchNextError(apalis_core::error::Error), + + /// Error while listening to PostgreSQL notifications. + #[error("Encountered an error during listening to PgNotification: {0}")] + PgNotificationError(apalis_core::error::Error), + + /// Error during a keep-alive heartbeat. + #[error("Encountered an error during KeepAlive heartbeat: `{0}`")] + KeepAliveError(sqlx::Error), + + /// Error during re-enqueuing orphaned tasks. + #[error("Encountered an error during ReenqueueOrphaned heartbeat: `{0}`")] + ReenqueueOrphanedError(sqlx::Error), + + /// Error during result encoding. + #[error("Encountered an error during encoding the result: {0}")] + CodecError(BoxDynError), +} + impl Backend, Res> for PostgresStorage where T: Serialize + DeserializeOwned + Sync + Send + Unpin + 'static, C: Codec + Send + 'static, + C::Error: std::error::Error + 'static + Send + Sync, { type Stream = BackendStream>>; type Layer = AckLayer, T, SqlContext, Res>; - fn poll(mut self, worker: WorkerId) -> Poller { + fn poll(mut self, worker: &Worker) -> Poller { let layer = AckLayer::new(self.clone()); let subscription = self.subscription.clone(); let config = self.config.clone(); @@ -135,6 +164,7 @@ where let (mut tx, rx) = mpsc::channel(self.config.buffer_size); let ack_notify = self.ack_notify.clone(); let pool = self.pool.clone(); + let worker = worker.clone(); let heartbeat = async move { let mut keep_alive_stm = apalis_core::interval::interval(config.keep_alive).fuse(); let mut reenqueue_orphaned_stm = @@ -168,23 +198,23 @@ where } if let Err(e) = self - .keep_alive_at::(&worker, Utc::now().timestamp()) + .keep_alive_at::(worker.id(), Utc::now().timestamp()) .await { - error!("KeepAliveError: {}", e); + worker.emit(Event::Error(Box::new(PgPollError::KeepAliveError(e)))); } loop { select! { _ = keep_alive_stm.next() => { - if let Err(e) = self.keep_alive_at::(&worker, Utc::now().timestamp()).await { - error!("KeepAliveError: {}", e); + if let Err(e) = self.keep_alive_at::(worker.id(), Utc::now().timestamp()).await { + worker.emit(Event::Error(Box::new(PgPollError::KeepAliveError(e)))); } } ids = ack_stream.next() => { if let Some(ids) = ids { - let ack_ids: Vec<(String, String, String, String, u64)> = ids.iter().map(|(ctx, res)| { - (res.task_id.to_string(), ctx.lock_by().clone().unwrap().to_string(), serde_json::to_string(&res.inner.as_ref().map_err(|e| e.to_string())).expect("Could not convert response to json"), calculate_status(&res.inner).to_string(), (res.attempt.current() + 1) as u64 ) + let ack_ids: Vec<(String, String, String, String, u64)> = ids.iter().map(|(_ctx, res)| { + (res.task_id.to_string(), worker.id().to_string(), serde_json::to_string(&res.inner.as_ref().map_err(|e| e.to_string())).expect("Could not convert response to json"), calculate_status(&res.inner).to_string(), (res.attempt.current() + 1) as u64 ) }).collect(); let query = "UPDATE apalis.jobs @@ -203,31 +233,41 @@ where ) Q WHERE apalis.jobs.id = Q.id; "; - if let Err(e) = sqlx::query(query) - .bind(serde_json::to_value(&ack_ids).unwrap()) - .execute(&pool) - .await - { - panic!("AckError: {e}"); + let codec_res = C::encode(&ack_ids); + match codec_res { + Ok(val) => { + if let Err(e) = sqlx::query(query) + .bind(val) + .execute(&pool) + .await + { + worker.emit(Event::Error(Box::new(PgPollError::AckError(e)))); + } + } + Err(e) => { + worker.emit(Event::Error(Box::new(PgPollError::CodecError(e.into())))); + } } + } } _ = poll_next_stm.next() => { - if let Err(e) = fetch_next_batch(&mut self, &worker, &mut tx).await { - error!("FetchNextError: {e}"); + if let Err(e) = fetch_next_batch(&mut self, worker.id(), &mut tx).await { + worker.emit(Event::Error(Box::new(PgPollError::FetchNextError(e)))); } } _ = pg_notification.next() => { - if let Err(e) = fetch_next_batch(&mut self, &worker, &mut tx).await { - error!("PgNotificationError: {e}"); + if let Err(e) = fetch_next_batch(&mut self, worker.id(), &mut tx).await { + worker.emit(Event::Error(Box::new(PgPollError::PgNotificationError(e)))); + } } _ = reenqueue_orphaned_stm.next() => { let dead_since = Utc::now() - - chrono::Duration::from_std(config.reenqueue_orphaned_after).unwrap(); + - chrono::Duration::from_std(config.reenqueue_orphaned_after).expect("could not build dead_since"); if let Err(e) = self.reenqueue_orphaned((config.buffer_size * 10) as i32, dead_since).await { - error!("ReenqueueOrphanedError: {}", e); + worker.emit(Event::Error(Box::new(PgPollError::ReenqueueOrphanedError(e)))); } } @@ -402,7 +442,7 @@ where let (req, parts) = job.req.take_parts(); let req = C::decode(req) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e))) - .unwrap(); + .expect("Unable to decode"); let mut req = Request::new_with_parts(req, parts); req.parts.namespace = Some(Namespace(self.config.namespace.clone())); req @@ -576,7 +616,7 @@ where let res = res.clone().map(|r| { C::encode(r) .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::Interrupted, e))) - .unwrap() + .expect("Could not encode result") }); self.ack_notify diff --git a/packages/apalis-sql/src/sqlite.rs b/packages/apalis-sql/src/sqlite.rs index 301d8c83..e692b652 100644 --- a/packages/apalis-sql/src/sqlite.rs +++ b/packages/apalis-sql/src/sqlite.rs @@ -11,7 +11,7 @@ use apalis_core::response::Response; use apalis_core::storage::Storage; use apalis_core::task::namespace::Namespace; use apalis_core::task::task_id::TaskId; -use apalis_core::worker::WorkerId; +use apalis_core::worker::{Context, Event, Worker, WorkerId}; use apalis_core::{Backend, Codec}; use async_stream::try_stream; use chrono::{DateTime, Utc}; @@ -448,40 +448,62 @@ impl SqliteStorage { } } +/// Errors that can occur while polling an SQLite database. +#[derive(thiserror::Error, Debug)] +pub enum SqlitePollError { + /// Error during a keep-alive heartbeat. + #[error("Encountered an error during KeepAlive heartbeat: `{0}`")] + KeepAliveError(sqlx::Error), + + /// Error during re-enqueuing orphaned tasks. + #[error("Encountered an error during ReenqueueOrphaned heartbeat: `{0}`")] + ReenqueueOrphanedError(sqlx::Error), +} + impl Backend, Res> for SqliteStorage { type Stream = BackendStream>>; type Layer = AckLayer, T, SqlContext, Res>; - fn poll(mut self, worker: WorkerId) -> Poller { + fn poll(mut self, worker: &Worker) -> Poller { let layer = AckLayer::new(self.clone()); let config = self.config.clone(); let controller = self.controller.clone(); let stream = self - .stream_jobs(&worker, config.poll_interval, config.buffer_size) + .stream_jobs(worker.id(), config.poll_interval, config.buffer_size) .map_err(|e| Error::SourceError(Arc::new(Box::new(e)))); let stream = BackendStream::new(stream.boxed(), controller); let requeue_storage = self.clone(); + let w = worker.clone(); let heartbeat = async move { loop { let now: i64 = Utc::now().timestamp(); - self.keep_alive_at::(&worker, now) - .await - .unwrap(); + if let Err(e) = self.keep_alive_at::(w.id(), now).await { + w.emit(Event::Error(Box::new(SqlitePollError::KeepAliveError(e)))); + } apalis_core::sleep(Duration::from_secs(30)).await; } } .boxed(); + let w = worker.clone(); let reenqueue_beat = async move { loop { let dead_since = Utc::now() - chrono::Duration::from_std(config.reenqueue_orphaned_after).unwrap(); if let Err(e) = requeue_storage - .reenqueue_orphaned(config.buffer_size.try_into().unwrap(), dead_since) + .reenqueue_orphaned( + config + .buffer_size + .try_into() + .expect("could not convert usize to i32"), + dead_since, + ) .await { - error!("ReenqueueOrphaned failed: {e}"); + w.emit(Event::Error(Box::new( + SqlitePollError::ReenqueueOrphanedError(e), + ))); } apalis_core::sleep(config.poll_interval).await; } @@ -507,7 +529,12 @@ impl Ack for SqliteStorage { .map_err(|e| sqlx::Error::Io(io::Error::new(io::ErrorKind::InvalidData, e)))?; sqlx::query(query) .bind(res.task_id.to_string()) - .bind(ctx.lock_by().as_ref().unwrap().to_string()) + .bind( + ctx.lock_by() + .as_ref() + .expect("Task is not locked") + .to_string(), + ) .bind(result) .bind(calculate_status(&res.inner).to_string()) .execute(&pool) From 24275eb8816b9278fef6cf4e1e269093e3e12e3b Mon Sep 17 00:00:00 2001 From: Geoffrey Mureithi <95377562+geofmureithi@users.noreply.github.com> Date: Sat, 23 Nov 2024 12:25:20 +0300 Subject: [PATCH 59/59] Feat: Introduce simple ability to pipe cron jobs to any backend (#455) * Feat: Introduce simple ability to pipe cron jobs to any backend This feature allows you to quickly persist cron jobs guaranteeing they will be run and can be distributed * lint: cargo fmt --- Cargo.toml | 2 +- examples/persisted-cron/Cargo.toml | 26 +++++++++ examples/persisted-cron/src/main.rs | 57 +++++++++++++++++++ packages/apalis-core/src/memory.rs | 1 + packages/apalis-core/src/poller/mod.rs | 17 +++--- packages/apalis-core/src/request.rs | 1 + packages/apalis-cron/src/lib.rs | 68 +++++++++++++++++++++++ packages/apalis-cron/src/pipe.rs | 77 ++++++++++++++++++++++++++ 8 files changed, 240 insertions(+), 9 deletions(-) create mode 100644 examples/persisted-cron/Cargo.toml create mode 100644 examples/persisted-cron/src/main.rs create mode 100644 packages/apalis-cron/src/pipe.rs diff --git a/Cargo.toml b/Cargo.toml index c599b78e..1da3a9a0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -125,7 +125,7 @@ members = [ "examples/catch-panic", "examples/graceful-shutdown", "examples/unmonitored-worker", - "examples/fn-args", + "examples/fn-args", "examples/persisted-cron", ] diff --git a/examples/persisted-cron/Cargo.toml b/examples/persisted-cron/Cargo.toml new file mode 100644 index 00000000..6e1924eb --- /dev/null +++ b/examples/persisted-cron/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "persisted-cron" +version = "0.1.0" +edition.workspace = true +repository.workspace = true + +[dependencies] +anyhow = "1" +apalis = { path = "../../", default-features = false, features = [ + "tokio-comp", + "tracing", + "limit", + "catch-panic" +] } +apalis-cron = { path = "../../packages/apalis-cron" } +apalis-sql = { path = "../../packages/apalis-sql", features = ["sqlite", "tokio-comp"] } +tokio = { version = "1", features = ["full"] } +serde = "1" +tracing-subscriber = "0.3.11" +chrono = { version = "0.4", default-features = false, features = ["clock"] } +pin-project-lite = "0.2.9" +tower = { version = "0.4", features = ["load-shed"] } + +[dependencies.tracing] +default-features = false +version = "0.1" diff --git a/examples/persisted-cron/src/main.rs b/examples/persisted-cron/src/main.rs new file mode 100644 index 00000000..eeb30283 --- /dev/null +++ b/examples/persisted-cron/src/main.rs @@ -0,0 +1,57 @@ +use apalis::prelude::*; + +use apalis_cron::CronStream; +use apalis_cron::Schedule; +use apalis_sql::sqlite::SqliteStorage; +use apalis_sql::sqlx::SqlitePool; +use chrono::{DateTime, Utc}; +use serde::Deserialize; +use serde::Serialize; +use std::str::FromStr; +use std::time::Duration; + +#[derive(Clone)] +struct FakeService; +impl FakeService { + fn execute(&self, item: Reminder) { + dbg!(&item.0); + } +} + +#[derive(Default, Debug, Clone, Serialize, Deserialize)] +struct Reminder(DateTime); +impl From> for Reminder { + fn from(t: DateTime) -> Self { + Reminder(t) + } +} +async fn send_reminder(job: Reminder, svc: Data) { + svc.execute(job); +} + +#[tokio::main] +async fn main() { + std::env::set_var("RUST_LOG", "debug,sqlx::query=error"); + tracing_subscriber::fmt::init(); + + // We create our cron jobs stream + let schedule = Schedule::from_str("1/1 * * * * *").unwrap(); + let cron_stream = CronStream::new(schedule); + + // Lets create a storage for our cron jobs + let pool = SqlitePool::connect("sqlite::memory:").await.unwrap(); + SqliteStorage::setup(&pool) + .await + .expect("unable to run migrations for sqlite"); + let sqlite = SqliteStorage::new(pool); + + let backend = cron_stream.pipe_to_storage(sqlite); + + let worker = WorkerBuilder::new("morning-cereal") + .enable_tracing() + .rate_limit(1, Duration::from_secs(2)) + .data(FakeService) + .backend(backend) + .build_fn(send_reminder); + Monitor::new().register(worker).run().await.unwrap(); +} diff --git a/packages/apalis-core/src/memory.rs b/packages/apalis-core/src/memory.rs index 68acc80e..7234b19a 100644 --- a/packages/apalis-core/src/memory.rs +++ b/packages/apalis-core/src/memory.rs @@ -107,6 +107,7 @@ impl Backend, Res> for MemoryStora stream: BackendStream::new(stream, self.controller), heartbeat: Box::pin(futures::future::pending()), layer: Identity::new(), + _priv: (), } } } diff --git a/packages/apalis-core/src/poller/mod.rs b/packages/apalis-core/src/poller/mod.rs index 5d5554db..fb3468de 100644 --- a/packages/apalis-core/src/poller/mod.rs +++ b/packages/apalis-core/src/poller/mod.rs @@ -9,19 +9,19 @@ pub mod stream; /// A poller type that allows fetching from a stream and a heartbeat future that can be used to do periodic tasks pub struct Poller { - pub(crate) stream: S, - pub(crate) heartbeat: BoxFuture<'static, ()>, - pub(crate) layer: L, + /// The stream of jobs + pub stream: S, + /// The heartbeat for the backend + pub heartbeat: BoxFuture<'static, ()>, + /// The tower middleware provided by the backend + pub layer: L, + pub(crate) _priv: (), } impl Poller { /// Build a new poller pub fn new(stream: S, heartbeat: impl Future + Send + 'static) -> Self { - Self { - stream, - heartbeat: heartbeat.boxed(), - layer: Identity::new(), - } + Self::new_with_layer(stream, heartbeat, Identity::new()) } /// Build a poller with layer @@ -34,6 +34,7 @@ impl Poller { stream, heartbeat: heartbeat.boxed(), layer, + _priv: (), } } } diff --git a/packages/apalis-core/src/request.rs b/packages/apalis-core/src/request.rs index d86cdd78..f1167d67 100644 --- a/packages/apalis-core/src/request.rs +++ b/packages/apalis-core/src/request.rs @@ -116,6 +116,7 @@ impl Backend, Res> for RequestStream { @@ -133,6 +141,66 @@ where }; Box::pin(stream) } + + /// Push cron job events to a storage and get a consumable Backend + pub fn pipe_to_storage(self, storage: S) -> CronPipe + where + S: Storage + Clone + Send + Sync + 'static, + S::Error: std::error::Error + Send + Sync + 'static, + { + let stream = self + .into_stream() + .then({ + let storage = storage.clone(); + move |res| { + let mut storage = storage.clone(); + async move { + match res { + Ok(Some(req)) => storage + .push(req.args) + .await + .map(|_| ()) + .map_err(|e| Box::new(e) as BoxDynError), + _ => Ok(()), + } + } + } + }) + .boxed(); + + CronPipe { + stream, + inner: storage, + } + } + /// Push cron job events to a message queue and get a consumable Backend + pub fn pipe_to_mq(self, mq: Mq) -> CronPipe + where + Mq: MessageQueue + Clone + Send + Sync + 'static, + Mq::Error: std::error::Error + Send + Sync + 'static, + { + let stream = self + .into_stream() + .then({ + let mq = mq.clone(); + move |res| { + let mut mq = mq.clone(); + async move { + match res { + Ok(Some(req)) => mq + .enqueue(req.args) + .await + .map(|_| ()) + .map_err(|e| Box::new(e) as BoxDynError), + _ => Ok(()), + } + } + } + }) + .boxed(); + + CronPipe { stream, inner: mq } + } } impl Backend, Res> for CronStream diff --git a/packages/apalis-cron/src/pipe.rs b/packages/apalis-cron/src/pipe.rs new file mode 100644 index 00000000..be5ada4f --- /dev/null +++ b/packages/apalis-cron/src/pipe.rs @@ -0,0 +1,77 @@ +use apalis_core::error::BoxDynError; +use apalis_core::request::BoxStream; +use apalis_core::Backend; +use apalis_core::{poller::Poller, request::Request, worker::Context, worker::Worker}; +use futures::StreamExt; +use std::{error, fmt}; +use tower::Service; + +/// A generic Pipe that wraps an inner type along with a `RequestStream`. +pub struct CronPipe { + pub(crate) stream: BoxStream<'static, Result<(), BoxDynError>>, + pub(crate) inner: Inner, +} + +impl fmt::Debug for CronPipe { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Pipe") + .field("stream", &">") // Placeholder as `RequestStream` might not implement Debug + .field("inner", &self.inner) + .finish() + } +} + +impl Backend, Res> for CronPipe +where + Inner: Backend, Res>, +{ + type Stream = Inner::Stream; + + type Layer = Inner::Layer; + + fn poll, Response = Res>>( + mut self, + worker: &Worker, + ) -> Poller { + let pipe_heartbeat = async move { while (self.stream.next().await).is_some() {} }; + let inner = self.inner.poll::(worker); + let heartbeat = inner.heartbeat; + + Poller::new_with_layer( + inner.stream, + async { + futures::join!(heartbeat, pipe_heartbeat); + }, + inner.layer, + ) + } +} + +/// A cron error +#[derive(Debug)] +pub struct PipeError { + kind: PipeErrorKind, +} + +/// The kind of pipe error that occurred +#[derive(Debug)] +pub enum PipeErrorKind { + /// The cron stream provided a None + EmptyStream, +} + +impl fmt::Display for PipeError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.kind { + PipeErrorKind::EmptyStream => write!(f, "The cron stream provided a None",), + } + } +} + +impl error::Error for PipeError {} + +impl From for PipeError { + fn from(kind: PipeErrorKind) -> PipeError { + PipeError { kind } + } +}