From 360f971987d126bcb913108e044fa4ceb9073e7d Mon Sep 17 00:00:00 2001 From: Cory Forsstrom Date: Tue, 20 Feb 2024 08:58:14 -0800 Subject: [PATCH 01/26] Add packager and refactor build module --- Cargo.lock | 7 + Cargo.toml | 1 + crates/boulder/Cargo.toml | 1 + crates/boulder/src/{builder.rs => build.rs} | 47 ++++-- crates/boulder/src/{ => build}/job.rs | 3 +- crates/boulder/src/{ => build}/job/step.rs | 3 +- crates/boulder/src/{ => build}/pgo.rs | 0 .../src/{dependency.rs => build/root.rs} | 63 +++++++- crates/boulder/src/{ => build}/upstream.rs | 0 crates/boulder/src/cli/build.rs | 17 +- crates/boulder/src/cli/chroot.rs | 8 +- crates/boulder/src/lib.rs | 10 +- crates/boulder/src/package.rs | 147 ++++++++++++++++++ crates/boulder/src/package/matcher.rs | 42 +++++ crates/boulder/src/root.rs | 65 -------- crates/stone_recipe/src/lib.rs | 10 +- crates/stone_recipe/src/script.rs | 5 + 17 files changed, 322 insertions(+), 107 deletions(-) rename crates/boulder/src/{builder.rs => build.rs} (93%) rename crates/boulder/src/{ => build}/job.rs (96%) rename crates/boulder/src/{ => build}/job/step.rs (99%) rename crates/boulder/src/{ => build}/pgo.rs (100%) rename crates/boulder/src/{dependency.rs => build/root.rs} (64%) rename crates/boulder/src/{ => build}/upstream.rs (100%) create mode 100644 crates/boulder/src/package.rs create mode 100644 crates/boulder/src/package/matcher.rs delete mode 100644 crates/boulder/src/root.rs diff --git a/Cargo.lock b/Cargo.lock index 67948a17e..6b981f1a0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -199,6 +199,7 @@ dependencies = [ "container", "dirs", "futures", + "glob", "hex", "itertools 0.12.0", "moss", @@ -840,6 +841,12 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" +[[package]] +name = "glob" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" + [[package]] name = "h2" version = "0.3.24" diff --git a/Cargo.toml b/Cargo.toml index 9be2b5a04..b4187d8b5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ dirs = "5.0" indicatif = "0.17.7" itertools = "0.12.0" futures = "0.3.30" +glob = "0.3.1" hex = "0.4.3" log = "0.4" nom = "7.1.3" diff --git a/crates/boulder/Cargo.toml b/crates/boulder/Cargo.toml index 302db8ec9..ec722257f 100644 --- a/crates/boulder/Cargo.toml +++ b/crates/boulder/Cargo.toml @@ -15,6 +15,7 @@ yaml = { path = "../yaml" } clap.workspace = true dirs.workspace = true +glob.workspace = true futures.workspace = true hex.workspace = true itertools.workspace = true diff --git a/crates/boulder/src/builder.rs b/crates/boulder/src/build.rs similarity index 93% rename from crates/boulder/src/builder.rs rename to crates/boulder/src/build.rs index 809c71dd5..6f8838909 100644 --- a/crates/boulder/src/builder.rs +++ b/crates/boulder/src/build.rs @@ -21,11 +21,18 @@ use stone_recipe::{ use thiserror::Error; use tui::Stylize; +pub mod job; +mod pgo; +mod root; +mod upstream; + +use self::job::Job; use crate::{ architecture::BuildTarget, container::{self, ExecError}, - job::{self, Step}, - macros, pgo, profile, recipe, root, upstream, util, Env, Job, Macros, Paths, Recipe, Runtime, + macros, + package::{self, Packager}, + profile, recipe, util, Env, Macros, Paths, Recipe, Runtime, }; pub struct Builder { @@ -113,7 +120,7 @@ impl Builder { Ok(()) } - pub fn build(self) -> Result<(), Error> { + pub fn build(self) -> Result { container::exec(&self.paths, self.recipe.parsed.options.networking, || { // We're now in the container =) @@ -186,7 +193,7 @@ impl Builder { ); // Write env to $HOME/.profile - std::fs::write(build_dir.join(".profile"), build_profile(script))?; + std::fs::write(build_dir.join(".profile"), format_profile(script))?; let mut command = process::Command::new("/bin/bash") .arg("--login") @@ -247,12 +254,15 @@ impl Builder { Ok(()) })?; - Ok(()) + + let packager = Packager::new(self.recipe, self.macros, self.targets)?; + + Ok(packager) } } fn logged( - step: Step, + step: job::Step, is_pgo: bool, command: &str, f: impl FnOnce(&mut process::Command) -> &mut process::Command, @@ -281,7 +291,7 @@ fn logged( Ok(result) } -fn log(step: Step, is_pgo: bool, pipe: R) -> thread::JoinHandle<()> +fn log(step: job::Step, is_pgo: bool, pipe: R) -> thread::JoinHandle<()> where R: io::Read + Send + 'static, { @@ -300,7 +310,7 @@ where }) } -pub fn build_profile(script: &Script) -> String { +pub fn format_profile(script: &Script) -> String { let env = script .env .as_deref() @@ -324,7 +334,12 @@ pub fn build_profile(script: &Script) -> String { format!("{env}\n{action_functions}\n{definition_vars}") } -fn breakpoint_line(breakpoint: &Breakpoint, recipe: &Recipe, build_target: BuildTarget, step: Step) -> Option { +fn breakpoint_line( + breakpoint: &Breakpoint, + recipe: &Recipe, + build_target: BuildTarget, + step: job::Step, +) -> Option { let profile = recipe.build_target_profile_key(build_target); let has_key = |line: &str, key: &str| { @@ -359,12 +374,12 @@ fn breakpoint_line(breakpoint: &Breakpoint, recipe: &Recipe, build_target: Build let step = match step { // Internal step, no breakpoint will occur - Step::Prepare => return None, - Step::Setup => "setup", - Step::Build => "build", - Step::Install => "install", - Step::Check => "check", - Step::Workload => "workload", + job::Step::Prepare => return None, + job::Step::Setup => "setup", + job::Step::Build => "build", + job::Step::Install => "install", + job::Step::Check => "check", + job::Step::Workload => "workload", }; lines.find_map(|(mut line_num, line)| { @@ -404,6 +419,8 @@ pub enum Error { Container(#[from] container::Error), #[error("recipe")] Recipe(#[from] recipe::Error), + #[error("create packager")] + Package(#[from] package::Error), #[error("io")] Io(#[from] io::Error), } diff --git a/crates/boulder/src/job.rs b/crates/boulder/src/build/job.rs similarity index 96% rename from crates/boulder/src/job.rs rename to crates/boulder/src/build/job.rs index c44e808ef..47e112ca7 100644 --- a/crates/boulder/src/job.rs +++ b/crates/boulder/src/build/job.rs @@ -12,7 +12,8 @@ use stone_recipe::{script, tuning, Script, Upstream}; use thiserror::Error; pub use self::step::Step; -use crate::{architecture::BuildTarget, pgo, util, Macros, Paths, Recipe}; +use crate::build::pgo; +use crate::{architecture::BuildTarget, util, Macros, Paths, Recipe}; mod step; diff --git a/crates/boulder/src/job/step.rs b/crates/boulder/src/build/job/step.rs similarity index 99% rename from crates/boulder/src/job/step.rs rename to crates/boulder/src/build/job/step.rs index 4d57ff09d..897204dc9 100644 --- a/crates/boulder/src/job/step.rs +++ b/crates/boulder/src/build/job/step.rs @@ -14,7 +14,8 @@ use stone_recipe::{ use tui::Stylize; use super::{work_dir, Error}; -use crate::{architecture::BuildTarget, pgo, util, Macros, Paths, Recipe}; +use crate::build::pgo; +use crate::{architecture::BuildTarget, util, Macros, Paths, Recipe}; pub fn list(pgo_stage: Option) -> Vec { if matches!(pgo_stage, Some(pgo::Stage::One | pgo::Stage::Two)) { diff --git a/crates/boulder/src/pgo.rs b/crates/boulder/src/build/pgo.rs similarity index 100% rename from crates/boulder/src/pgo.rs rename to crates/boulder/src/build/pgo.rs diff --git a/crates/boulder/src/dependency.rs b/crates/boulder/src/build/root.rs similarity index 64% rename from crates/boulder/src/dependency.rs rename to crates/boulder/src/build/root.rs index 87275341f..3ec123501 100644 --- a/crates/boulder/src/dependency.rs +++ b/crates/boulder/src/build/root.rs @@ -3,12 +3,59 @@ // SPDX-License-Identifier: MPL-2.0 use std::collections::HashSet; +use std::io; +use moss::repository; use stone_recipe::{tuning::Toolchain, Upstream}; +use thiserror::Error; -use crate::Builder; +use crate::build::Builder; +use crate::{container, util}; -pub fn calculate(builder: &Builder) -> Vec<&str> { +pub async fn populate(builder: &Builder, repositories: repository::Map) -> Result<(), Error> { + let packages = packages(builder); + + let rootfs = builder.paths.rootfs().host; + + // Recreate root + util::recreate_dir(&rootfs).await?; + + let mut moss_client = moss::Client::with_explicit_repositories("boulder", &builder.env.moss_dir, repositories) + .await? + .ephemeral(&rootfs)?; + + moss_client.install(&packages, true).await?; + + Ok(()) +} + +pub fn clean(builder: &Builder) -> Result<(), Error> { + // Dont't need to clean if it doesn't exist + if !builder.paths.build().host.exists() { + return Ok(()); + } + + // We recreate inside the container so we don't + // get permissions error if this is a rootless build + // and there's subuid mappings into the user namespace + container::exec(&builder.paths, false, || { + // Recreate `install` dir + util::sync::recreate_dir(&builder.paths.install().guest)?; + + for target in &builder.targets { + for job in &target.jobs { + // Recerate build dir + util::sync::recreate_dir(&job.build_dir)?; + } + } + + Ok(()) + })?; + + Ok(()) +} + +fn packages(builder: &Builder) -> Vec<&str> { let mut packages = BASE_PACKAGES.to_vec(); match builder.recipe.parsed.options.toolchain { @@ -107,3 +154,15 @@ const LLVM_PACKAGES: &[&str] = &["clang"]; const LLVM32_PACKAGES: &[&str] = &["clang-32bit", "libcxx-32bit-devel"]; const CCACHE_PACKAGE: &str = "binary(ccache)"; + +#[derive(Debug, Error)] +pub enum Error { + #[error("io")] + Io(#[from] io::Error), + #[error("moss client")] + MossClient(#[from] moss::client::Error), + #[error("moss install")] + MossInstall(#[from] moss::client::install::Error), + #[error("container")] + Container(#[from] container::Error), +} diff --git a/crates/boulder/src/upstream.rs b/crates/boulder/src/build/upstream.rs similarity index 100% rename from crates/boulder/src/upstream.rs rename to crates/boulder/src/build/upstream.rs diff --git a/crates/boulder/src/cli/build.rs b/crates/boulder/src/cli/build.rs index 02e376aac..89ff8e4be 100644 --- a/crates/boulder/src/cli/build.rs +++ b/crates/boulder/src/cli/build.rs @@ -5,9 +5,8 @@ use std::io; use std::path::PathBuf; -use boulder::builder; -use boulder::Builder; -use boulder::{profile, Env}; +use boulder::build::{self, Builder}; +use boulder::{package, profile, Env}; use clap::Parser; use thiserror::Error; @@ -45,8 +44,12 @@ pub fn handle(command: Command, env: Env) -> Result<(), Error> { } let builder = Builder::new(&recipe, env, profile, ccache)?; + builder.setup()?; - builder.build()?; + + let packager = builder.build()?; + + packager.package()?; Ok(()) } @@ -57,8 +60,10 @@ pub enum Error { MissingOutput(PathBuf), #[error("recipe file does not exist: {0:?}")] MissingRecipe(PathBuf), - #[error("builder")] - Builder(#[from] builder::Error), + #[error("build recipe")] + Build(#[from] build::Error), + #[error("package artifacts")] + Package(#[from] package::Error), #[error("io")] Io(#[from] io::Error), } diff --git a/crates/boulder/src/cli/chroot.rs b/crates/boulder/src/cli/chroot.rs index 87fbf203b..c1add3fbb 100644 --- a/crates/boulder/src/cli/chroot.rs +++ b/crates/boulder/src/cli/chroot.rs @@ -6,7 +6,7 @@ use std::{fs, io, path::PathBuf, process}; use boulder::{ architecture::{self, BuildTarget}, - builder, container, job, macros, recipe, Env, Macros, Paths, Recipe, + build, container, macros, recipe, Env, Macros, Paths, Recipe, }; use clap::Parser; use thiserror::Error; @@ -43,7 +43,7 @@ pub fn handle(command: Command, env: Env) -> Result<(), Error> { // The step doesn't matter, but we use `prepare` // since it uses hardcoded content that's always // available to create a script from - let script = job::Step::Prepare + let script = build::job::Step::Prepare .script( BuildTarget::Native(architecture::host()), None, @@ -54,7 +54,7 @@ pub fn handle(command: Command, env: Env) -> Result<(), Error> { ) .map_err(Error::BuildScript)? .expect("script always available for prepare step"); - let profile = &builder::build_profile(&script); + let profile = &build::format_profile(&script); let home = &paths.build().guest; @@ -88,7 +88,7 @@ pub enum Error { #[error("macros")] Macros(#[from] macros::Error), #[error("build script")] - BuildScript(#[source] job::Error), + BuildScript(#[source] build::job::Error), #[error("recipe")] Recipe(#[from] recipe::Error), #[error("io")] diff --git a/crates/boulder/src/lib.rs b/crates/boulder/src/lib.rs index 88f82677f..89ddf38a9 100644 --- a/crates/boulder/src/lib.rs +++ b/crates/boulder/src/lib.rs @@ -2,9 +2,7 @@ // // SPDX-License-Identifier: MPL-2.0 pub use self::architecture::Architecture; -pub use self::builder::Builder; pub use self::env::Env; -pub use self::job::Job; pub use self::macros::Macros; pub use self::paths::Paths; pub use self::profile::Profile; @@ -12,17 +10,13 @@ pub use self::recipe::Recipe; pub use self::runtime::Runtime; pub mod architecture; -pub mod builder; +pub mod build; pub mod container; -mod dependency; pub mod env; -pub mod job; pub mod macros; +pub mod package; pub mod paths; -pub mod pgo; pub mod profile; pub mod recipe; -pub mod root; mod runtime; -pub mod upstream; pub mod util; diff --git a/crates/boulder/src/package.rs b/crates/boulder/src/package.rs new file mode 100644 index 000000000..8ff5752b4 --- /dev/null +++ b/crates/boulder/src/package.rs @@ -0,0 +1,147 @@ +use std::collections::{hash_map, HashMap}; + +use itertools::Itertools; +use stone_recipe::{script, Package}; +use thiserror::Error; + +use crate::{build, Macros, Recipe}; + +pub use self::matcher::Matcher; + +pub mod matcher; + +pub struct Packager { + packages: HashMap, + matcher: Matcher, + recipe: Recipe, +} + +impl Packager { + pub fn new(recipe: Recipe, macros: Macros, targets: Vec) -> Result { + // Arch names used to parse [`Marcos`] for package templates + // + // We always use "base" plus whatever build targets we've built + let arches = Some("base".to_string()) + .into_iter() + .chain(targets.into_iter().map(|target| target.build_target.to_string())); + + // Resolves all package templates from arch macros + recipe file + let packages = resolve_packages(arches, ¯os, &recipe)?; + + let mut matcher = Matcher::default(); + + // Add all package files to the matcher + for (name, package) in &packages { + for path in &package.paths { + matcher.add_rule(matcher::Rule { + pattern: path.path.clone(), + target: name.clone(), + }); + } + } + + Ok(Self { + matcher, + packages, + recipe, + }) + } + + pub fn package(self) -> Result<(), Error> { + Ok(()) + } +} + +/// Resolve all package templates from the arch macros and +/// incoming recipe. Package templates may have variables so +/// they are fully expanded before returned. +fn resolve_packages( + arches: impl IntoIterator, + macros: &Macros, + recipe: &Recipe, +) -> Result, Error> { + let mut parser = script::Parser::new(); + parser.add_definition("name", &recipe.parsed.source.name); + parser.add_definition("version", &recipe.parsed.source.version); + parser.add_definition("release", recipe.parsed.source.release); + + let mut packages = HashMap::new(); + + // Add a package, ensuring it's fully expanded + // + // If a name collision occurs, merge the incoming and stored + // packages + let mut add_package = |mut name: String, mut package: Package| { + name = parser.parse_content(&name)?; + + package.summary = package + .summary + .map(|summary| parser.parse_content(&summary)) + .transpose()?; + package.description = package + .description + .map(|description| parser.parse_content(&description)) + .transpose()?; + package.run_deps = package + .run_deps + .into_iter() + .map(|dep| parser.parse_content(&dep)) + .collect::>()?; + package.paths = package + .paths + .into_iter() + .map(|mut path| { + path.path = parser.parse_content(&path.path)?; + Ok(path) + }) + .collect::>()?; + + match packages.entry(name.clone()) { + hash_map::Entry::Vacant(entry) => { + entry.insert(package); + } + hash_map::Entry::Occupied(entry) => { + let prev = entry.remove(); + + package.run_deps = package.run_deps.into_iter().chain(prev.run_deps).sorted().collect(); + package.paths = package + .paths + .into_iter() + .chain(prev.paths) + .sorted_by_key(|p| p.path.clone()) + .collect(); + + packages.insert(name, package); + } + } + + Result::<_, Error>::Ok(()) + }; + + // Add packages templates from each architecture + for arch in arches.into_iter() { + if let Some(macros) = macros.arch.get(&arch) { + for entry in macros.packages.clone().into_iter() { + add_package(entry.key, entry.value)?; + } + } + } + + // Add the root recipe package + add_package(recipe.parsed.source.name.clone(), recipe.parsed.package.clone())?; + + // Add the recipe sub-packages + recipe + .parsed + .sub_packages + .iter() + .try_for_each(|entry| add_package(entry.key.clone(), entry.value.clone()))?; + + Ok(packages) +} + +#[derive(Debug, Error)] +pub enum Error { + #[error("script")] + Script(#[from] script::Error), +} diff --git a/crates/boulder/src/package/matcher.rs b/crates/boulder/src/package/matcher.rs new file mode 100644 index 000000000..1f8acbcad --- /dev/null +++ b/crates/boulder/src/package/matcher.rs @@ -0,0 +1,42 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2024 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use glob::Pattern; + +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct Rule { + pub pattern: String, + pub target: String, +} + +impl Rule { + pub fn matches(&self, path: &str) -> bool { + self.pattern == path + || path.starts_with(&self.pattern) + || Pattern::new(&self.pattern) + .map(|pattern| pattern.matches(path)) + .unwrap_or_default() + } +} + +#[derive(Debug, Default)] +pub struct Matcher { + /// Rules stored in order of + /// ascending priority + rules: Vec, +} + +impl Matcher { + pub fn add_rule(&mut self, rule: Rule) { + self.rules.push(rule); + } + + pub fn matching_target(&self, path: &str) -> Option<&str> { + // Rev = check highest priority rules first + self.rules + .iter() + .rev() + .find_map(|rule| rule.matches(path).then_some(rule.target.as_str())) + } +} diff --git a/crates/boulder/src/root.rs b/crates/boulder/src/root.rs deleted file mode 100644 index 3b86a0ba3..000000000 --- a/crates/boulder/src/root.rs +++ /dev/null @@ -1,65 +0,0 @@ -// SPDX-FileCopyrightText: Copyright © 2020-2024 Serpent OS Developers -// -// SPDX-License-Identifier: MPL-2.0 - -use std::io; - -use moss::repository; -use thiserror::Error; - -use crate::{container, dependency, util, Builder}; - -pub async fn populate(builder: &Builder, repositories: repository::Map) -> Result<(), Error> { - let packages = dependency::calculate(builder); - - let rootfs = builder.paths.rootfs().host; - - // Recreate root - util::recreate_dir(&rootfs).await?; - - let mut moss_client = moss::Client::with_explicit_repositories("boulder", &builder.env.moss_dir, repositories) - .await? - .ephemeral(&rootfs)?; - - moss_client.install(&packages, true).await?; - - Ok(()) -} - -pub fn clean(builder: &Builder) -> Result<(), Error> { - // Dont't need to clean if it doesn't exist - if !builder.paths.build().host.exists() { - return Ok(()); - } - - // We recreate inside the container so we don't - // get permissions error if this is a rootless build - // and there's subuid mappings into the user namespace - container::exec(&builder.paths, false, || { - // Recreate `install` dir - util::sync::recreate_dir(&builder.paths.install().guest)?; - - for target in &builder.targets { - for job in &target.jobs { - // Recerate build dir - util::sync::recreate_dir(&job.build_dir)?; - } - } - - Ok(()) - })?; - - Ok(()) -} - -#[derive(Debug, Error)] -pub enum Error { - #[error("io")] - Io(#[from] io::Error), - #[error("moss client")] - MossClient(#[from] moss::client::Error), - #[error("moss install")] - MossInstall(#[from] moss::client::install::Error), - #[error("container")] - Container(#[from] container::Error), -} diff --git a/crates/stone_recipe/src/lib.rs b/crates/stone_recipe/src/lib.rs index 7642f611d..91ad77064 100644 --- a/crates/stone_recipe/src/lib.rs +++ b/crates/stone_recipe/src/lib.rs @@ -239,9 +239,9 @@ impl<'de> Deserialize<'de> for Upstream { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct Path { - pub path: PathBuf, + pub path: String, pub kind: PathKind, } @@ -253,8 +253,8 @@ impl<'de> Deserialize<'de> for Path { #[derive(Debug, Deserialize)] #[serde(untagged)] enum Inner { - String(PathBuf), - KeyValue(HashMap), + String(String), + KeyValue(HashMap), } match Inner::deserialize(deserializer)? { @@ -273,7 +273,7 @@ impl<'de> Deserialize<'de> for Path { } } -#[derive(Debug, Clone, Copy, Deserialize, strum::EnumString, Default)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, strum::EnumString, Default)] #[serde(try_from = "&str")] #[strum(serialize_all = "lowercase")] pub enum PathKind { diff --git a/crates/stone_recipe/src/script.rs b/crates/stone_recipe/src/script.rs index d7499a4b3..b3dc87a1e 100644 --- a/crates/stone_recipe/src/script.rs +++ b/crates/stone_recipe/src/script.rs @@ -95,6 +95,11 @@ impl Parser { resolved_definitions, }) } + + pub fn parse_content(&self, input: &str) -> Result { + parse_content_only(input, &self.actions, &self.definitions, &mut Default::default()) + .map(Option::unwrap_or_default) + } } #[derive(Debug, Clone, PartialEq, Eq)] From 337ce1c4fdda04bc43a78bee034b138ea774fe45 Mon Sep 17 00:00:00 2001 From: Cory Forsstrom Date: Tue, 20 Feb 2024 11:02:56 -0800 Subject: [PATCH 02/26] Add dflags --- crates/boulder/src/build/job/step.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/crates/boulder/src/build/job/step.rs b/crates/boulder/src/build/job/step.rs index 897204dc9..423e07c6c 100644 --- a/crates/boulder/src/build/job/step.rs +++ b/crates/boulder/src/build/job/step.rs @@ -148,11 +148,6 @@ impl Step { parser.add_definition("buildroot", build_dir.display()); parser.add_definition("workdir", work_dir.display()); - // TODO: Remaining definitions & tune flags - parser.add_definition("cflags", ""); - parser.add_definition("cxxflags", ""); - parser.add_definition("ldflags", ""); - parser.add_definition("compiler_cache", "/mason/ccache"); let path = if ccache { @@ -170,6 +165,7 @@ impl Step { parser.add_definition("compiler_cpp", "clang -E -"); parser.add_definition("compiler_objcpp", "clang -E -"); parser.add_definition("compiler_objcxxcpp", "clang++ -E"); + parser.add_definition("compiler_d", "ldc2"); parser.add_definition("compiler_ar", "llvm-ar"); parser.add_definition("compiler_ld", "ld.lld"); parser.add_definition("compiler_objcopy", "llvm-objcopy"); @@ -185,6 +181,7 @@ impl Step { parser.add_definition("compiler_cpp", "gcc -E"); parser.add_definition("compiler_objcpp", "gcc -E"); parser.add_definition("compiler_objcxxcpp", "g++ -E"); + parser.add_definition("compiler_d", "ldc2"); // FIXME: GDC parser.add_definition("compiler_ar", "gcc-ar"); parser.add_definition("compiler_ld", "ld.bfd"); parser.add_definition("compiler_objcopy", "objcopy"); @@ -342,10 +339,16 @@ fn add_tuning( .iter() .filter_map(|flag| flag.get(tuning::CompilerFlag::Ld, toolchain)), ); + let dflags = fmt_flags( + flags + .iter() + .filter_map(|flag| flag.get(tuning::CompilerFlag::D, toolchain)), + ); parser.add_definition("cflags", cflags); parser.add_definition("cxxflags", cxxflags); parser.add_definition("ldflags", ldflags); + parser.add_definition("dflags", dflags); Ok(()) } From 2a3ee88b3e4a3752f197755d2feee76e28343060 Mon Sep 17 00:00:00 2001 From: Cory Forsstrom Date: Tue, 20 Feb 2024 16:00:07 -0800 Subject: [PATCH 03/26] Add package emitting --- Cargo.lock | 1 + crates/boulder/Cargo.toml | 1 + crates/boulder/src/build.rs | 19 ++- crates/boulder/src/build/root.rs | 2 +- crates/boulder/src/cli/chroot.rs | 2 +- crates/boulder/src/container.rs | 29 ++--- crates/boulder/src/package.rs | 123 ++++++++++++++---- crates/boulder/src/package/analysis.rs | 4 + crates/boulder/src/package/collect.rs | 173 +++++++++++++++++++++++++ crates/boulder/src/package/emit.rs | 151 +++++++++++++++++++++ crates/boulder/src/package/matcher.rs | 42 ------ crates/boulder/src/paths.rs | 4 +- crates/boulder/src/util.rs | 16 ++- crates/container/src/lib.rs | 17 ++- crates/moss/src/stone.rs | 2 + crates/stone/src/write.rs | 2 +- 16 files changed, 491 insertions(+), 97 deletions(-) create mode 100644 crates/boulder/src/package/analysis.rs create mode 100644 crates/boulder/src/package/collect.rs create mode 100644 crates/boulder/src/package/emit.rs delete mode 100644 crates/boulder/src/package/matcher.rs diff --git a/Cargo.lock b/Cargo.lock index 6b981f1a0..1f1af0a4b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -213,6 +213,7 @@ dependencies = [ "tokio", "tui", "url", + "xxhash-rust", "yaml", ] diff --git a/crates/boulder/Cargo.toml b/crates/boulder/Cargo.toml index ec722257f..8be27f3db 100644 --- a/crates/boulder/Cargo.toml +++ b/crates/boulder/Cargo.toml @@ -27,3 +27,4 @@ strum.workspace = true thiserror.workspace = true tokio.workspace = true url.workspace = true +xxhash-rust.workspace = true diff --git a/crates/boulder/src/build.rs b/crates/boulder/src/build.rs index 6f8838909..a4c19ed51 100644 --- a/crates/boulder/src/build.rs +++ b/crates/boulder/src/build.rs @@ -29,8 +29,7 @@ mod upstream; use self::job::Job; use crate::{ architecture::BuildTarget, - container::{self, ExecError}, - macros, + container, macros, package::{self, Packager}, profile, recipe, util, Env, Macros, Paths, Recipe, Runtime, }; @@ -255,7 +254,7 @@ impl Builder { Ok(()) })?; - let packager = Packager::new(self.recipe, self.macros, self.targets)?; + let packager = Packager::new(self.paths, self.recipe, self.macros, self.targets)?; Ok(packager) } @@ -424,3 +423,17 @@ pub enum Error { #[error("io")] Io(#[from] io::Error), } + +#[derive(Debug, Error)] +pub enum ExecError { + #[error("failed with status code {0}")] + Code(i32), + #[error("stopped by signal {}", .0.as_str())] + Signal(Signal), + #[error("stopped by unknown signal")] + UnknownSignal, + #[error(transparent)] + Nix(#[from] nix::Error), + #[error(transparent)] + Io(#[from] io::Error), +} diff --git a/crates/boulder/src/build/root.rs b/crates/boulder/src/build/root.rs index 3ec123501..19e25e1e3 100644 --- a/crates/boulder/src/build/root.rs +++ b/crates/boulder/src/build/root.rs @@ -49,7 +49,7 @@ pub fn clean(builder: &Builder) -> Result<(), Error> { } } - Ok(()) + Ok(()) as Result<_, io::Error> })?; Ok(()) diff --git a/crates/boulder/src/cli/chroot.rs b/crates/boulder/src/cli/chroot.rs index c1add3fbb..22060695a 100644 --- a/crates/boulder/src/cli/chroot.rs +++ b/crates/boulder/src/cli/chroot.rs @@ -71,7 +71,7 @@ pub fn handle(command: Command, env: Env) -> Result<(), Error> { child.wait()?; - Ok(()) + Ok(()) as Result<_, io::Error> })?; Ok(()) diff --git a/crates/boulder/src/container.rs b/crates/boulder/src/container.rs index c1ceff11e..e0ecaaae8 100644 --- a/crates/boulder/src/container.rs +++ b/crates/boulder/src/container.rs @@ -2,19 +2,22 @@ // // SPDX-License-Identifier: MPL-2.0 -use std::io; - use container::Container; -use nix::sys::signal::Signal; use thiserror::Error; use crate::Paths; -pub fn exec(paths: &Paths, networking: bool, f: impl FnMut() -> Result<(), ExecError>) -> Result<(), Error> { +pub fn exec(paths: &Paths, networking: bool, f: impl FnMut() -> Result<(), E>) -> Result<(), Error> +where + E: std::error::Error + 'static, +{ run(paths, networking, f) } -fn run(paths: &Paths, networking: bool, f: impl FnMut() -> Result<(), ExecError>) -> Result<(), Error> { +fn run(paths: &Paths, networking: bool, f: impl FnMut() -> Result<(), E>) -> Result<(), Error> +where + E: std::error::Error + 'static, +{ let rootfs = paths.rootfs().host; let artefacts = paths.artefacts(); let build = paths.build(); @@ -30,7 +33,7 @@ fn run(paths: &Paths, networking: bool, f: impl FnMut() -> Result<(), ExecError> .bind_rw(&build.host, &build.guest) .bind_rw(&compiler.host, &compiler.guest) .bind_ro(&recipe.host, &recipe.guest) - .run::(f)?; + .run::(f)?; Ok(()) } @@ -40,17 +43,3 @@ pub enum Error { #[error(transparent)] Container(#[from] container::Error), } - -#[derive(Debug, Error)] -pub enum ExecError { - #[error("failed with status code {0}")] - Code(i32), - #[error("stopped by signal {}", .0.as_str())] - Signal(Signal), - #[error("stopped by unknown signal")] - UnknownSignal, - #[error(transparent)] - Nix(#[from] nix::Error), - #[error(transparent)] - Io(#[from] io::Error), -} diff --git a/crates/boulder/src/package.rs b/crates/boulder/src/package.rs index 8ff5752b4..9a8794919 100644 --- a/crates/boulder/src/package.rs +++ b/crates/boulder/src/package.rs @@ -1,23 +1,36 @@ -use std::collections::{hash_map, HashMap}; +// SPDX-FileCopyrightText: Copyright © 2020-2024 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 +use std::{ + collections::{hash_map, HashMap}, + fs, io, +}; use itertools::Itertools; +use moss::stone::write::digest; use stone_recipe::{script, Package}; use thiserror::Error; -use crate::{build, Macros, Recipe}; +use crate::{build, container, util, Macros, Paths, Recipe}; -pub use self::matcher::Matcher; +use self::collect::Collector; +use self::emit::emit; -pub mod matcher; +mod analysis; +mod collect; +mod emit; pub struct Packager { - packages: HashMap, - matcher: Matcher, + paths: Paths, recipe: Recipe, + packages: HashMap, + collector: Collector, } impl Packager { - pub fn new(recipe: Recipe, macros: Macros, targets: Vec) -> Result { + pub fn new(paths: Paths, recipe: Recipe, macros: Macros, targets: Vec) -> Result { + let mut collector = Collector::default(); + // Arch names used to parse [`Marcos`] for package templates // // We always use "base" plus whatever build targets we've built @@ -25,29 +38,59 @@ impl Packager { .into_iter() .chain(targets.into_iter().map(|target| target.build_target.to_string())); - // Resolves all package templates from arch macros + recipe file - let packages = resolve_packages(arches, ¯os, &recipe)?; - - let mut matcher = Matcher::default(); - - // Add all package files to the matcher - for (name, package) in &packages { - for path in &package.paths { - matcher.add_rule(matcher::Rule { - pattern: path.path.clone(), - target: name.clone(), - }); - } - } + // Resolves all package templates from arch macros + recipe file. Also adds + // package paths to [`Collector`] + let packages = resolve_packages(arches, ¯os, &recipe, &mut collector)?; Ok(Self { - matcher, - packages, + paths, recipe, + collector, + packages, }) } - pub fn package(self) -> Result<(), Error> { + pub fn package(mut self) -> Result<(), Error> { + // Executed in guest container since file permissions may be borked + // for host if run rootless + container::exec(&self.paths, false, || { + let root = self.paths.install().guest; + + // Hasher used for calculating file digests + let mut hasher = digest::Hasher::new(); + + // Collect all paths under install root and group them by + // the package template they match against + let paths = self + .collector + .paths(&root, None, &mut hasher) + .map_err(Error::CollectPaths)? + .into_iter() + .into_group_map(); + + // Combine paths per package with the package definition + let packages_to_emit = paths + .into_iter() + .filter_map(|(name, paths)| { + let definition = self.packages.remove(&name)?; + + Some(emit::Package::new( + name, + self.recipe.parsed.source.clone(), + definition, + paths, + )) + }) + .collect(); + + emit(&self.paths, packages_to_emit).map_err(Error::Emit)?; + + Ok(()) as Result<(), Error> + })?; + + // We've exited container, sync artefacts to host + sync_artefacts(&self.paths).map_err(Error::SyncArtefacts)?; + Ok(()) } } @@ -59,6 +102,7 @@ fn resolve_packages( arches: impl IntoIterator, macros: &Macros, recipe: &Recipe, + collector: &mut Collector, ) -> Result, Error> { let mut parser = script::Parser::new(); parser.add_definition("name", &recipe.parsed.source.name); @@ -96,6 +140,14 @@ fn resolve_packages( }) .collect::>()?; + // Add each path to collector + for path in &package.paths { + collector.add_rule(collect::Rule { + pattern: path.path.clone(), + package: name.clone(), + }); + } + match packages.entry(name.clone()) { hash_map::Entry::Vacant(entry) => { entry.insert(package); @@ -140,8 +192,31 @@ fn resolve_packages( Ok(packages) } +fn sync_artefacts(paths: &Paths) -> Result<(), io::Error> { + for path in util::sync::enumerate_files(&paths.artefacts().host, |_| true)? { + let filename = path.file_name().and_then(|p| p.to_str()).unwrap_or_default(); + + let target = paths.recipe().host.join(filename); + + if target.exists() { + fs::remove_file(&target)?; + } + + util::sync::hardlink_or_copy(&path, &target)?; + } + Ok(()) +} + #[derive(Debug, Error)] pub enum Error { #[error("script")] Script(#[from] script::Error), + #[error("collect install paths")] + CollectPaths(#[source] io::Error), + #[error("sync artefacts")] + SyncArtefacts(#[source] io::Error), + #[error("emit packages")] + Emit(#[from] emit::Error), + #[error("container")] + Container(#[from] container::Error), } diff --git a/crates/boulder/src/package/analysis.rs b/crates/boulder/src/package/analysis.rs new file mode 100644 index 000000000..db09e01b6 --- /dev/null +++ b/crates/boulder/src/package/analysis.rs @@ -0,0 +1,4 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2024 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 +// TODO diff --git a/crates/boulder/src/package/collect.rs b/crates/boulder/src/package/collect.rs new file mode 100644 index 000000000..ac8c4cb9b --- /dev/null +++ b/crates/boulder/src/package/collect.rs @@ -0,0 +1,173 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2024 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 +use std::{ + fs::{self, Metadata}, + io, + os::unix::fs::{FileTypeExt, MetadataExt}, + path::{Path, PathBuf}, +}; + +use glob::Pattern; +use moss::stone::payload::{layout, Layout}; +use moss::stone::write::digest; +use nix::libc::{S_IFDIR, S_IRGRP, S_IROTH, S_IRWXU, S_IXGRP, S_IXOTH}; + +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct Rule { + pub pattern: String, + pub package: String, +} + +impl Rule { + pub fn matches(&self, path: &str) -> bool { + self.pattern == path + || path.starts_with(&self.pattern) + || Pattern::new(&self.pattern) + .map(|pattern| pattern.matches(path)) + .unwrap_or_default() + } +} + +#[derive(Debug, Default)] +pub struct Collector { + /// Rules stored in order of + /// ascending priority + rules: Vec, +} + +impl Collector { + pub fn add_rule(&mut self, rule: Rule) { + self.rules.push(rule); + } + + pub fn matching_package(&self, path: &str) -> Option<&str> { + // Rev = check highest priority rules first + self.rules + .iter() + .rev() + .find_map(|rule| rule.matches(path).then_some(rule.package.as_str())) + } + + pub fn paths( + &self, + root: &Path, + subdir: Option<(PathBuf, Metadata)>, + hasher: &mut digest::Hasher, + ) -> Result, io::Error> { + let mut paths = vec![]; + + let add_path = + |path: PathBuf, metadata: Metadata, paths: &mut Vec<(String, PathInfo)>, hasher: &mut digest::Hasher| { + let target_path = Path::new("/").join(path.strip_prefix(root).expect("path is ancestor of root")); + + if let Some(package) = self.matching_package(target_path.to_str().unwrap_or_default()) { + paths.push((package.to_string(), PathInfo::new(path, target_path, metadata, hasher)?)) + } + + Ok(()) as Result<(), io::Error> + }; + + let dir = subdir.as_ref().map(|t| t.0.as_path()).unwrap_or(root); + let entries = fs::read_dir(dir)?; + + for result in entries { + let entry = result?; + let metadata = entry.metadata()?; + + let host_path = entry.path(); + + if metadata.is_dir() { + paths.extend(self.paths(root, Some((host_path, metadata)), hasher)?); + } else { + add_path(host_path, metadata, &mut paths, hasher)?; + } + } + + // Include empty or special dir + // + // Regular 755 dir w/ entries can be + // recreated when adding children + if let Some((dir, meta)) = subdir { + const REGULAR_DIR_MODE: u32 = S_IFDIR | S_IROTH | S_IXOTH | S_IRGRP | S_IXGRP | S_IRWXU; + + let is_special = meta.mode() != REGULAR_DIR_MODE; + + if meta.is_dir() && (paths.is_empty() || is_special) { + add_path(dir, meta, &mut paths, hasher)?; + } + } + + Ok(paths) + } +} + +#[derive(Debug)] +pub struct PathInfo { + pub path: PathBuf, + pub layout: Layout, + pub size: u64, +} + +impl PathInfo { + pub fn new( + path: PathBuf, + target_path: PathBuf, + metadata: Metadata, + hasher: &mut digest::Hasher, + ) -> Result { + // Strip /usr prefix + let target = target_path + .strip_prefix("/usr") + .unwrap_or(&target_path) + .to_string_lossy() + .to_string(); + + let file_type = metadata.file_type(); + + let layout = Layout { + uid: metadata.uid(), + gid: metadata.gid(), + mode: metadata.mode(), + tag: 0, + entry: if file_type.is_symlink() { + let source = fs::read_link(&path)?; + + layout::Entry::Symlink(source.to_string_lossy().to_string(), target) + } else if file_type.is_dir() { + layout::Entry::Directory(target) + } else if file_type.is_char_device() { + layout::Entry::CharacterDevice(target) + } else if file_type.is_block_device() { + layout::Entry::BlockDevice(target) + } else if file_type.is_fifo() { + layout::Entry::Fifo(target) + } else if file_type.is_socket() { + layout::Entry::Socket(target) + } else { + hasher.reset(); + + let mut digest_writer = digest::Writer::new(io::sink(), hasher); + let mut file = fs::File::open(&path)?; + + // Copy bytes to null sink so we don't + // explode memory + io::copy(&mut file, &mut digest_writer)?; + + let hash = hasher.digest128(); + + layout::Entry::Regular(hash, target) + }, + }; + + Ok(Self { + path, + layout, + size: metadata.size(), + }) + } + + pub fn is_file(&self) -> bool { + matches!(self.layout.entry, layout::Entry::Regular(_, _)) + } +} diff --git a/crates/boulder/src/package/emit.rs b/crates/boulder/src/package/emit.rs new file mode 100644 index 000000000..9810a834e --- /dev/null +++ b/crates/boulder/src/package/emit.rs @@ -0,0 +1,151 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2024 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 +use std::{ + fs::{self, File}, + io::{self, Write}, +}; + +use itertools::Itertools; +use moss::{package::Meta, stone, Dependency}; +use thiserror::Error; + +use super::collect::PathInfo; +use crate::{architecture, Architecture, Paths}; + +pub struct Package { + pub name: String, + pub build_release: u64, + pub architecture: Architecture, + pub source: stone_recipe::Source, + pub definition: stone_recipe::Package, + pub paths: Vec, +} + +impl Package { + pub fn new( + name: String, + source: stone_recipe::Source, + template: stone_recipe::Package, + paths: Vec, + ) -> Self { + Self { + name, + build_release: 1, + architecture: architecture::host(), + source, + definition: template, + paths, + } + } + + pub fn filename(&self) -> String { + format!( + "{}-{}-{}-{}-{}.stone", + self.name, self.source.version, self.source.release, self.build_release, self.architecture + ) + } +} + +// TODO: Add binary & json manifest +pub fn emit(paths: &Paths, packages: Vec) -> Result<(), Error> { + for package in packages { + emit_package(paths, package)?; + } + + Ok(()) +} + +fn emit_package(paths: &Paths, package: Package) -> Result<(), Error> { + let filename = package.filename(); + + // Output file to artefacts directory + let out_path = paths.artefacts().guest.join(&filename); + if out_path.exists() { + fs::remove_file(&out_path)?; + } + let mut out_file = File::create(out_path)?; + + // Create stone binary writer + let mut writer = stone::Writer::new(&mut out_file, stone::header::v1::FileType::Binary)?; + + // Add metadata + { + let metadata = Meta { + name: package.name.into(), + version_identifier: package.source.version, + source_release: package.source.release, + build_release: package.build_release, + architecture: package.architecture.to_string(), + summary: package.definition.summary.unwrap_or_default(), + description: package.definition.description.unwrap_or_default(), + source_id: package.source.name, + homepage: package.source.homepage, + licenses: package.source.license.into_iter().sorted().collect(), + // TODO: Deps from analyzer + dependencies: package + .definition + .run_deps + .into_iter() + .filter_map(|dep| dep.parse::().ok()) + .sorted_by_key(|dep| dep.to_string()) + .collect(), + // TODO: Providers from analyzer + providers: Default::default(), + uri: None, + hash: None, + download_size: None, + }; + writer.add_payload(metadata.to_stone_payload().as_slice())?; + } + + // Add layouts + { + let layouts = package.paths.iter().map(|p| p.layout.clone()).collect::>(); + writer.add_payload(layouts.as_slice())?; + } + + // Temp file for building content payload + let temp_content_path = format!("/tmp/{}.tmp", &filename); + let mut temp_content = File::options() + .read(true) + .append(true) + .create(true) + .open(&temp_content_path)?; + + // Sort all files by size, largest to smallest + let files = package + .paths + .into_iter() + .filter(|p| p.is_file()) + .sorted_by(|a, b| a.size.cmp(&b.size).reverse()) + .collect::>(); + + // Convert to content writer using pledged size = total size of all files + let pledged_size = files.iter().map(|p| p.size).sum(); + let mut writer = writer.with_content(&mut temp_content, Some(pledged_size))?; + + // Add each file content + for info in files { + let mut file = File::open(info.path)?; + + writer.add_content(&mut file)?; + } + + // Finalize & flush + writer.finalize()?; + out_file.flush()?; + + // Remove temp content file + fs::remove_file(temp_content_path)?; + + Ok(()) +} + +#[derive(Debug, Error)] +pub enum Error { + #[error("stone binary writer")] + StoneBinaryWriter(#[from] stone::write::Error), + #[error("io")] + Io(#[from] io::Error), +} diff --git a/crates/boulder/src/package/matcher.rs b/crates/boulder/src/package/matcher.rs deleted file mode 100644 index 1f8acbcad..000000000 --- a/crates/boulder/src/package/matcher.rs +++ /dev/null @@ -1,42 +0,0 @@ -// SPDX-FileCopyrightText: Copyright © 2020-2024 Serpent OS Developers -// -// SPDX-License-Identifier: MPL-2.0 - -use glob::Pattern; - -#[derive(Debug, Clone, Eq, PartialEq)] -pub struct Rule { - pub pattern: String, - pub target: String, -} - -impl Rule { - pub fn matches(&self, path: &str) -> bool { - self.pattern == path - || path.starts_with(&self.pattern) - || Pattern::new(&self.pattern) - .map(|pattern| pattern.matches(path)) - .unwrap_or_default() - } -} - -#[derive(Debug, Default)] -pub struct Matcher { - /// Rules stored in order of - /// ascending priority - rules: Vec, -} - -impl Matcher { - pub fn add_rule(&mut self, rule: Rule) { - self.rules.push(rule); - } - - pub fn matching_target(&self, path: &str) -> Option<&str> { - // Rev = check highest priority rules first - self.rules - .iter() - .rev() - .find_map(|rule| rule.matches(path).then_some(rule.target.as_str())) - } -} diff --git a/crates/boulder/src/paths.rs b/crates/boulder/src/paths.rs index afda51354..a6f29af20 100644 --- a/crates/boulder/src/paths.rs +++ b/crates/boulder/src/paths.rs @@ -92,9 +92,7 @@ impl Paths { pub fn install(&self) -> Mapping { Mapping { - // TODO: Shitty impossible state, this folder - // doesn't exist on host - host: "".into(), + host: self.rootfs().host.join("mason").join("install"), guest: self.guest_root.join("install"), } } diff --git a/crates/boulder/src/util.rs b/crates/boulder/src/util.rs index d17559c70..32120fd60 100644 --- a/crates/boulder/src/util.rs +++ b/crates/boulder/src/util.rs @@ -97,11 +97,13 @@ pub fn is_root() -> bool { pub mod sync { use std::{ - fs::{create_dir_all, remove_dir_all}, + fs::{copy, create_dir_all, remove_dir_all}, io, path::{Path, PathBuf}, }; + use nix::unistd::{linkat, LinkatFlags}; + pub fn ensure_dir_exists(path: &Path) -> Result<(), io::Error> { if !path.exists() { create_dir_all(path)?; @@ -141,4 +143,16 @@ pub mod sync { Ok(paths) } + + pub fn hardlink_or_copy(from: &Path, to: &Path) -> Result<(), io::Error> { + // Attempt hard link + let link_result = linkat(None, from, None, to, LinkatFlags::NoSymlinkFollow); + + // Copy instead + if link_result.is_err() { + copy(from, to)?; + } + + Ok(()) + } } diff --git a/crates/container/src/lib.rs b/crates/container/src/lib.rs index b96cbac77..a9f1b5785 100644 --- a/crates/container/src/lib.rs +++ b/crates/container/src/lib.rs @@ -137,7 +137,7 @@ impl Container { Ok(_) => 0, // Write error back to parent process Err(error) => { - let error = error.to_string(); + let error = format_error(error); let mut pos = 0; while pos < error.len() { @@ -387,6 +387,21 @@ pub fn forward_sigint(pid: Pid) -> Result<(), nix::Error> { Ok(()) } +fn format_error(error: impl std::error::Error) -> String { + let sources = sources(&error); + sources.join(": ") +} + +fn sources(error: &dyn std::error::Error) -> Vec { + let mut sources = vec![error.to_string()]; + let mut source = error.source(); + while let Some(error) = source.take() { + sources.push(error.to_string()); + source = error.source(); + } + sources +} + struct Bind { source: PathBuf, target: PathBuf, diff --git a/crates/moss/src/stone.rs b/crates/moss/src/stone.rs index eed6cd9c8..7822045ac 100644 --- a/crates/moss/src/stone.rs +++ b/crates/moss/src/stone.rs @@ -4,8 +4,10 @@ pub use stone::header; pub use stone::payload; +pub use stone::write; pub use self::read::stream_payloads; +pub use self::write::Writer; pub mod read { use std::{fs::File, path::PathBuf}; diff --git a/crates/stone/src/write.rs b/crates/stone/src/write.rs index b96865e00..3509beb9e 100644 --- a/crates/stone/src/write.rs +++ b/crates/stone/src/write.rs @@ -11,7 +11,7 @@ use crate::{ Header, }; -mod digest; +pub mod digest; mod zstd; pub struct Writer { From 0de1ad65bc7b4b48c353f72247c5238d9189f4b6 Mon Sep 17 00:00:00 2001 From: Cory Forsstrom Date: Wed, 21 Feb 2024 07:15:31 -0800 Subject: [PATCH 04/26] Clean artifacts dir before packaging --- crates/boulder/src/package.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/boulder/src/package.rs b/crates/boulder/src/package.rs index 9a8794919..b0ddcd9a3 100644 --- a/crates/boulder/src/package.rs +++ b/crates/boulder/src/package.rs @@ -51,6 +51,9 @@ impl Packager { } pub fn package(mut self) -> Result<(), Error> { + // Remove old artifacts + util::sync::recreate_dir(&self.paths.artefacts().host).map_err(Error::RecreateArtefactsDir)?; + // Executed in guest container since file permissions may be borked // for host if run rootless container::exec(&self.paths, false, || { @@ -213,6 +216,8 @@ pub enum Error { Script(#[from] script::Error), #[error("collect install paths")] CollectPaths(#[source] io::Error), + #[error("recreate artefacts dir")] + RecreateArtefactsDir(#[source] io::Error), #[error("sync artefacts")] SyncArtefacts(#[source] io::Error), #[error("emit packages")] From 8b6dffea281e3d5fe78b3f25a63cefbaa9016de6 Mon Sep 17 00:00:00 2001 From: Cory Forsstrom Date: Wed, 21 Feb 2024 07:44:30 -0800 Subject: [PATCH 05/26] Add manifest output --- Cargo.lock | 1 + Cargo.toml | 1 + crates/boulder/Cargo.toml | 1 + crates/boulder/src/package.rs | 4 +- crates/boulder/src/package/emit.rs | 87 ++++++++++------- crates/boulder/src/package/emit/manifest.rs | 78 +++++++++++++++ .../src/package/emit/manifest/binary.rs | 48 ++++++++++ .../boulder/src/package/emit/manifest/json.rs | 94 +++++++++++++++++++ crates/boulder/src/recipe.rs | 1 + crates/moss/src/dependency.rs | 26 ++++- crates/moss/src/registry/transaction.rs | 2 +- crates/stone/src/payload/layout.rs | 18 ++-- 12 files changed, 316 insertions(+), 45 deletions(-) create mode 100644 crates/boulder/src/package/emit/manifest.rs create mode 100644 crates/boulder/src/package/emit/manifest/binary.rs create mode 100644 crates/boulder/src/package/emit/manifest/json.rs diff --git a/Cargo.lock b/Cargo.lock index 1f1af0a4b..d892fe027 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -205,6 +205,7 @@ dependencies = [ "moss", "nix", "serde", + "serde_json", "serde_yaml", "sha2", "stone_recipe", diff --git a/Cargo.toml b/Cargo.toml index b4187d8b5..f20f8f0dc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -31,6 +31,7 @@ petgraph = "0.6.4" rayon = "1.8" reqwest = { version = "0.11.23", default-features = false, features = ["rustls-tls", "stream"] } serde = { version = "1", features = ["derive"] } +serde_json = "1" serde_yaml = "0.9" sha2 = "0.10.8" sqlx = { version = "0.7.3", features = ["sqlite", "chrono", "runtime-tokio"] } diff --git a/crates/boulder/Cargo.toml b/crates/boulder/Cargo.toml index 8be27f3db..fa5c0fda0 100644 --- a/crates/boulder/Cargo.toml +++ b/crates/boulder/Cargo.toml @@ -21,6 +21,7 @@ hex.workspace = true itertools.workspace = true nix.workspace = true serde.workspace = true +serde_json.workspace = true serde_yaml.workspace = true sha2.workspace = true strum.workspace = true diff --git a/crates/boulder/src/package.rs b/crates/boulder/src/package.rs index b0ddcd9a3..670a65fcd 100644 --- a/crates/boulder/src/package.rs +++ b/crates/boulder/src/package.rs @@ -84,9 +84,9 @@ impl Packager { paths, )) }) - .collect(); + .collect::>(); - emit(&self.paths, packages_to_emit).map_err(Error::Emit)?; + emit(&self.paths, &self.recipe, &packages_to_emit).map_err(Error::Emit)?; Ok(()) as Result<(), Error> })?; diff --git a/crates/boulder/src/package/emit.rs b/crates/boulder/src/package/emit.rs index 9810a834e..88407411d 100644 --- a/crates/boulder/src/package/emit.rs +++ b/crates/boulder/src/package/emit.rs @@ -10,9 +10,13 @@ use itertools::Itertools; use moss::{package::Meta, stone, Dependency}; use thiserror::Error; +use self::manifest::Manifest; use super::collect::PathInfo; -use crate::{architecture, Architecture, Paths}; +use crate::{architecture, Architecture, Paths, Recipe}; +mod manifest; + +#[derive(Debug)] pub struct Package { pub name: String, pub build_release: u64, @@ -39,24 +43,65 @@ impl Package { } } + pub fn is_dbginfo(&self) -> bool { + self.name.ends_with("-dbginfo") + } + pub fn filename(&self) -> String { format!( "{}-{}-{}-{}-{}.stone", self.name, self.source.version, self.source.release, self.build_release, self.architecture ) } + + pub fn meta(&self) -> Meta { + Meta { + name: self.name.clone().into(), + version_identifier: self.source.version.clone(), + source_release: self.source.release, + build_release: self.build_release, + architecture: self.architecture.to_string(), + summary: self.definition.summary.clone().unwrap_or_default(), + description: self.definition.description.clone().unwrap_or_default(), + source_id: self.source.name.clone(), + homepage: self.source.homepage.clone(), + licenses: self.source.license.clone().into_iter().sorted().collect(), + // TODO: Deps from analyzer + dependencies: self + .definition + .run_deps + .clone() + .into_iter() + .filter_map(|dep| dep.parse::().ok()) + .sorted_by_key(|dep| dep.to_string()) + .collect(), + // TODO: Providers from analyzer + providers: Default::default(), + uri: None, + hash: None, + download_size: None, + } + } } -// TODO: Add binary & json manifest -pub fn emit(paths: &Paths, packages: Vec) -> Result<(), Error> { +pub fn emit(paths: &Paths, recipe: &Recipe, packages: &[Package]) -> Result<(), Error> { + let mut manfiest = Manifest::new(paths, recipe, architecture::host()); + for package in packages { + if !package.is_dbginfo() { + manfiest.add_package(package); + } + emit_package(paths, package)?; } + manfiest.write_binary()?; + manfiest.write_json()?; + Ok(()) } -fn emit_package(paths: &Paths, package: Package) -> Result<(), Error> { +fn emit_package(paths: &Paths, package: &Package) -> Result<(), Error> { let filename = package.filename(); // Output file to artefacts directory @@ -71,32 +116,8 @@ fn emit_package(paths: &Paths, package: Package) -> Result<(), Error> { // Add metadata { - let metadata = Meta { - name: package.name.into(), - version_identifier: package.source.version, - source_release: package.source.release, - build_release: package.build_release, - architecture: package.architecture.to_string(), - summary: package.definition.summary.unwrap_or_default(), - description: package.definition.description.unwrap_or_default(), - source_id: package.source.name, - homepage: package.source.homepage, - licenses: package.source.license.into_iter().sorted().collect(), - // TODO: Deps from analyzer - dependencies: package - .definition - .run_deps - .into_iter() - .filter_map(|dep| dep.parse::().ok()) - .sorted_by_key(|dep| dep.to_string()) - .collect(), - // TODO: Providers from analyzer - providers: Default::default(), - uri: None, - hash: None, - download_size: None, - }; - writer.add_payload(metadata.to_stone_payload().as_slice())?; + let meta = package.meta(); + writer.add_payload(meta.to_stone_payload().as_slice())?; } // Add layouts @@ -116,7 +137,7 @@ fn emit_package(paths: &Paths, package: Package) -> Result<(), Error> { // Sort all files by size, largest to smallest let files = package .paths - .into_iter() + .iter() .filter(|p| p.is_file()) .sorted_by(|a, b| a.size.cmp(&b.size).reverse()) .collect::>(); @@ -127,7 +148,7 @@ fn emit_package(paths: &Paths, package: Package) -> Result<(), Error> { // Add each file content for info in files { - let mut file = File::open(info.path)?; + let mut file = File::open(&info.path)?; writer.add_content(&mut file)?; } @@ -146,6 +167,8 @@ fn emit_package(paths: &Paths, package: Package) -> Result<(), Error> { pub enum Error { #[error("stone binary writer")] StoneBinaryWriter(#[from] stone::write::Error), + #[error("manifest")] + Manifest(#[from] manifest::Error), #[error("io")] Io(#[from] io::Error), } diff --git a/crates/boulder/src/package/emit/manifest.rs b/crates/boulder/src/package/emit/manifest.rs new file mode 100644 index 000000000..e271debf8 --- /dev/null +++ b/crates/boulder/src/package/emit/manifest.rs @@ -0,0 +1,78 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2024 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::{collections::BTreeSet, io, path::PathBuf}; + +use moss::stone; +use thiserror::Error; + +use crate::{Architecture, Paths, Recipe}; + +use super::Package; + +mod binary; +mod json; + +#[derive(Debug)] +pub struct Manifest<'a> { + recipe: &'a Recipe, + arch: Architecture, + output_dir: PathBuf, + build_deps: BTreeSet, + packages: Vec<&'a Package>, +} + +impl<'a> Manifest<'a> { + pub fn new(paths: &Paths, recipe: &'a Recipe, arch: Architecture) -> Self { + let output_dir = paths.artefacts().guest; + + let build_deps = recipe + .parsed + .build + .build_deps + .iter() + .chain(&recipe.parsed.build.check_deps) + .cloned() + .collect(); + + Self { + recipe, + output_dir, + arch, + build_deps, + packages: vec![], + } + } + + pub fn add_package(&mut self, package: &'a Package) { + self.packages.push(package); + } + + pub fn write_binary(&self) -> Result<(), Error> { + binary::write( + &self.output_dir.join(format!("manifest.{}.bin", self.arch)), + &self.packages, + &self.build_deps, + ) + } + + pub fn write_json(&self) -> Result<(), Error> { + json::write( + &self.output_dir.join(format!("manifest.{}.jsonc", self.arch)), + self.recipe, + &self.packages, + &self.build_deps, + ) + } +} + +#[derive(Debug, Error)] +pub enum Error { + #[error("stone binary writer")] + StoneWriter(#[from] stone::write::Error), + #[error("encode json")] + Json(#[from] serde_json::Error), + #[error("io")] + Io(#[from] io::Error), +} diff --git a/crates/boulder/src/package/emit/manifest/binary.rs b/crates/boulder/src/package/emit/manifest/binary.rs new file mode 100644 index 000000000..c4ec81293 --- /dev/null +++ b/crates/boulder/src/package/emit/manifest/binary.rs @@ -0,0 +1,48 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2024 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::{collections::BTreeSet, fs::File, path::Path}; + +use moss::{ + dependency, + stone::{ + self, + header::v1::FileType, + payload::{self, meta}, + }, + Dependency, +}; + +use super::Error; +use crate::package::emit::Package; + +pub fn write(path: &Path, packages: &[&Package], build_deps: &BTreeSet) -> Result<(), Error> { + let mut output = File::create(path)?; + + let mut writer = stone::Writer::new(&mut output, FileType::BuildManifest)?; + + // Add each package + for package in packages { + let mut payload = package.meta().to_stone_payload(); + + // Add build deps + for name in build_deps { + let dep = name.parse::().unwrap_or_else(|_| Dependency { + kind: dependency::Kind::PackageName, + name: name.to_string(), + }); + + payload.push(payload::Meta { + tag: meta::Tag::BuildDepends, + kind: meta::Kind::Dependency(dep.kind.into(), dep.name), + }); + } + + writer.add_payload(payload.as_slice())?; + } + + writer.finalize()?; + + Ok(()) +} diff --git a/crates/boulder/src/package/emit/manifest/json.rs b/crates/boulder/src/package/emit/manifest/json.rs new file mode 100644 index 000000000..4c147de98 --- /dev/null +++ b/crates/boulder/src/package/emit/manifest/json.rs @@ -0,0 +1,94 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2024 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::{ + collections::{BTreeMap, BTreeSet}, + fs::File, + io::Write, + path::Path, +}; + +use itertools::Itertools; +use serde::Serialize; + +use super::Error; +use crate::{package::emit, Recipe}; + +pub fn write( + path: &Path, + recipe: &Recipe, + packages: &[&emit::Package], + build_deps: &BTreeSet, +) -> Result<(), Error> { + let packages = packages + .iter() + .map(|package| { + let name = package.name.to_string(); + + let build_depends = build_deps.iter().cloned().collect(); + + let files = package + .paths + .iter() + .map(|p| format!("/usr/{}", p.layout.entry.target())) + .sorted() + .collect(); + + let package = Package { + build_depends, + // TODO + depends: vec![], + files, + name: name.clone(), + // TODO + provides: vec![], + }; + + (name, package) + }) + .collect(); + + let content = Content { + manifest_version: "0.2".to_string(), + packages, + source_name: recipe.parsed.source.name.clone(), + source_release: recipe.parsed.source.release.to_string(), + source_version: recipe.parsed.source.version.clone(), + }; + + let mut file = File::create(path)?; + + writeln!( + &mut file, + "/** Human readable report. This is not consumed by boulder */" + )?; + + let mut serializer = + serde_json::Serializer::with_formatter(&mut file, serde_json::ser::PrettyFormatter::with_indent(&[b'\t'])); + content.serialize(&mut serializer)?; + + writeln!(&mut file)?; + + Ok(()) +} + +#[derive(Serialize)] +#[serde(rename_all = "kebab-case")] +struct Content { + manifest_version: String, + packages: BTreeMap, + source_name: String, + source_release: String, + source_version: String, +} + +#[derive(Serialize)] +#[serde(rename_all = "kebab-case")] +struct Package { + build_depends: Vec, + depends: Vec, + files: Vec, + name: String, + provides: Vec, +} diff --git a/crates/boulder/src/recipe.rs b/crates/boulder/src/recipe.rs index 7384394eb..feb8b4401 100644 --- a/crates/boulder/src/recipe.rs +++ b/crates/boulder/src/recipe.rs @@ -10,6 +10,7 @@ use crate::architecture::{self, BuildTarget}; pub type Parsed = stone_recipe::Recipe; +#[derive(Debug)] pub struct Recipe { pub path: PathBuf, pub source: String, diff --git a/crates/moss/src/dependency.rs b/crates/moss/src/dependency.rs index 7a86ad8f8..73b27cb9e 100644 --- a/crates/moss/src/dependency.rs +++ b/crates/moss/src/dependency.rs @@ -7,7 +7,7 @@ use std::{fmt, str::FromStr}; use stone::payload; use thiserror::Error; -#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum Kind { /// Name based dependency PackageName, @@ -118,6 +118,18 @@ pub struct Dependency { pub name: String, } +impl PartialOrd for Dependency { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Dependency { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.to_string().cmp(&other.to_string()) + } +} + /// Pretty-printing of dependencies (e.g.: `binary(whoami)`) impl fmt::Display for Dependency { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -154,6 +166,18 @@ impl Provider { } } +impl PartialOrd for Provider { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Provider { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.to_string().cmp(&other.to_string()) + } +} + impl fmt::Display for Provider { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}({})", self.kind, self.name) diff --git a/crates/moss/src/registry/transaction.rs b/crates/moss/src/registry/transaction.rs index 95578fddc..47b966e7a 100644 --- a/crates/moss/src/registry/transaction.rs +++ b/crates/moss/src/registry/transaction.rs @@ -104,7 +104,7 @@ impl<'a> Transaction<'a> { let package = matches.first().ok_or(Error::NoCandidate(check_id.clone().into()))?; for dependency in package.meta.dependencies.iter() { let provider = Provider { - kind: dependency.kind.clone(), + kind: dependency.kind, name: dependency.name.clone(), }; diff --git a/crates/stone/src/payload/layout.rs b/crates/stone/src/payload/layout.rs index e6da4526e..b5473b4d9 100644 --- a/crates/stone/src/payload/layout.rs +++ b/crates/stone/src/payload/layout.rs @@ -60,15 +60,15 @@ impl Entry { } } - fn target(&self) -> Vec { + pub fn target(&self) -> &str { match self { - Entry::Regular(_, target) => target.as_bytes().to_vec(), - Entry::Symlink(_, target) => target.as_bytes().to_vec(), - Entry::Directory(target) => target.as_bytes().to_vec(), - Entry::CharacterDevice(target) => target.as_bytes().to_vec(), - Entry::BlockDevice(target) => target.as_bytes().to_vec(), - Entry::Fifo(target) => target.as_bytes().to_vec(), - Entry::Socket(target) => target.as_bytes().to_vec(), + Entry::Regular(_, target) => target, + Entry::Symlink(_, target) => target, + Entry::Directory(target) => target, + Entry::CharacterDevice(target) => target, + Entry::BlockDevice(target) => target, + Entry::Fifo(target) => target, + Entry::Socket(target) => target, } } @@ -163,7 +163,7 @@ impl Record for Layout { writer.write_u8(self.entry.file_type())?; writer.write_array([0; 11])?; writer.write_all(&source)?; - writer.write_all(&target)?; + writer.write_all(target.as_bytes())?; Ok(()) } From 59dcd9d720ff073e9bc3d6546ea16ec5a660b837 Mon Sep 17 00:00:00 2001 From: Cory Forsstrom Date: Wed, 21 Feb 2024 18:23:03 -0800 Subject: [PATCH 06/26] Add analysis handlers ELF is still incomplete --- Cargo.lock | 8 + Cargo.toml | 1 + crates/boulder/Cargo.toml | 2 + crates/boulder/src/build.rs | 2 + crates/boulder/src/package.rs | 54 ++--- crates/boulder/src/package/analysis.rs | 124 +++++++++++- .../boulder/src/package/analysis/handler.rs | 136 +++++++++++++ .../src/package/analysis/handler/elf.rs | 101 ++++++++++ crates/boulder/src/package/collect.rs | 190 ++++++++++++------ crates/boulder/src/package/emit.rs | 50 +++-- crates/boulder/src/package/emit/manifest.rs | 2 +- .../src/package/emit/manifest/binary.rs | 16 +- .../boulder/src/package/emit/manifest/json.rs | 18 +- crates/boulder/src/paths.rs | 2 +- crates/moss/src/dependency.rs | 13 ++ crates/moss/src/package/meta.rs | 6 +- 16 files changed, 595 insertions(+), 130 deletions(-) create mode 100644 crates/boulder/src/package/analysis/handler.rs create mode 100644 crates/boulder/src/package/analysis/handler/elf.rs diff --git a/Cargo.lock b/Cargo.lock index d892fe027..941ce32dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -198,12 +198,14 @@ dependencies = [ "config", "container", "dirs", + "elf", "futures", "glob", "hex", "itertools 0.12.0", "moss", "nix", + "rayon", "serde", "serde_json", "serde_yaml", @@ -615,6 +617,12 @@ dependencies = [ "serde", ] +[[package]] +name = "elf" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4445909572dbd556c457c849c4ca58623d84b27c8fff1e74b0b4227d8b90d17b" + [[package]] name = "encode_unicode" version = "0.3.6" diff --git a/Cargo.toml b/Cargo.toml index f20f8f0dc..6647c2859 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,6 +18,7 @@ clap = { version = "4.4.11", features = ["derive", "string"] } crossterm = "0.27.0" dialoguer = "0.11.0" dirs = "5.0" +elf = "0.7.4" indicatif = "0.17.7" itertools = "0.12.0" futures = "0.3.30" diff --git a/crates/boulder/Cargo.toml b/crates/boulder/Cargo.toml index fa5c0fda0..59dfcda4b 100644 --- a/crates/boulder/Cargo.toml +++ b/crates/boulder/Cargo.toml @@ -15,11 +15,13 @@ yaml = { path = "../yaml" } clap.workspace = true dirs.workspace = true +elf.workspace = true glob.workspace = true futures.workspace = true hex.workspace = true itertools.workspace = true nix.workspace = true +rayon.workspace = true serde.workspace = true serde_json.workspace = true serde_yaml.workspace = true diff --git a/crates/boulder/src/build.rs b/crates/boulder/src/build.rs index a4c19ed51..eced8d69d 100644 --- a/crates/boulder/src/build.rs +++ b/crates/boulder/src/build.rs @@ -251,6 +251,8 @@ impl Builder { } } + println!(); + Ok(()) })?; diff --git a/crates/boulder/src/package.rs b/crates/boulder/src/package.rs index 670a65fcd..a63313937 100644 --- a/crates/boulder/src/package.rs +++ b/crates/boulder/src/package.rs @@ -29,7 +29,7 @@ pub struct Packager { impl Packager { pub fn new(paths: Paths, recipe: Recipe, macros: Macros, targets: Vec) -> Result { - let mut collector = Collector::default(); + let mut collector = Collector::new(paths.install().guest); // Arch names used to parse [`Marcos`] for package templates // @@ -50,43 +50,45 @@ impl Packager { }) } - pub fn package(mut self) -> Result<(), Error> { + pub fn package(self) -> Result<(), Error> { // Remove old artifacts util::sync::recreate_dir(&self.paths.artefacts().host).map_err(Error::RecreateArtefactsDir)?; // Executed in guest container since file permissions may be borked // for host if run rootless container::exec(&self.paths, false, || { - let root = self.paths.install().guest; - // Hasher used for calculating file digests let mut hasher = digest::Hasher::new(); - // Collect all paths under install root and group them by - // the package template they match against + // Collect all paths under install root let paths = self .collector - .paths(&root, None, &mut hasher) - .map_err(Error::CollectPaths)? - .into_iter() - .into_group_map(); - - // Combine paths per package with the package definition - let packages_to_emit = paths - .into_iter() - .filter_map(|(name, paths)| { - let definition = self.packages.remove(&name)?; - - Some(emit::Package::new( - name, - self.recipe.parsed.source.clone(), - definition, - paths, - )) + .enumerate_paths(None, &mut hasher) + .map_err(Error::CollectPaths)?; + + // Process all paths with the analysis chain + // This will determine which files get included + // and what deps / provides they produce + let mut analysis = analysis::Chain::new(); + analysis.process(paths).map_err(Error::Analysis)?; + + // Combine the package definition with the analysis results + // for that package. We will use this to emit the package stones & manifests. + // + // If no bucket exists, that means no paths matched this package so we can + // safely filter it out + let packages = self + .packages + .iter() + .filter_map(|(name, package)| { + let bucket = analysis.buckets.remove(name)?; + + Some(emit::Package::new(name, &self.recipe.parsed.source, package, bucket)) }) .collect::>(); - emit(&self.paths, &self.recipe, &packages_to_emit).map_err(Error::Emit)?; + // Emit package stones and manifest files to artefact directory + emit(&self.paths, &self.recipe, &packages).map_err(Error::Emit)?; Ok(()) as Result<(), Error> })?; @@ -215,11 +217,13 @@ pub enum Error { #[error("script")] Script(#[from] script::Error), #[error("collect install paths")] - CollectPaths(#[source] io::Error), + CollectPaths(#[source] collect::Error), #[error("recreate artefacts dir")] RecreateArtefactsDir(#[source] io::Error), #[error("sync artefacts")] SyncArtefacts(#[source] io::Error), + #[error("analyzing paths")] + Analysis(#[source] analysis::BoxError), #[error("emit packages")] Emit(#[from] emit::Error), #[error("container")] diff --git a/crates/boulder/src/package/analysis.rs b/crates/boulder/src/package/analysis.rs index db09e01b6..1e2859ad9 100644 --- a/crates/boulder/src/package/analysis.rs +++ b/crates/boulder/src/package/analysis.rs @@ -1,4 +1,126 @@ // SPDX-FileCopyrightText: Copyright © 2020-2024 Serpent OS Developers // // SPDX-License-Identifier: MPL-2.0 -// TODO + +use std::collections::{BTreeSet, HashMap, VecDeque}; + +use moss::{Dependency, Provider}; +use tui::Stylize; + +use super::collect::PathInfo; + +mod handler; + +pub type BoxError = Box; + +pub struct Chain { + handlers: Vec>, + pub buckets: HashMap, +} + +impl Chain { + pub fn new() -> Self { + Self { + handlers: vec![ + Box::new(handler::ignore_blocked), + Box::new(handler::binary), + Box::new(handler::elf), + Box::new(handler::pkg_config), + Box::new(handler::cmake), + // Catch-all if not excluded + Box::new(handler::include_any), + ], + buckets: Default::default(), + } + } + + pub fn process(&mut self, paths: impl IntoIterator) -> Result<(), BoxError> { + let mut queue = paths.into_iter().collect::>(); + + 'paths: while let Some(mut path) = queue.pop_front() { + let bucket = self.buckets.entry(path.package.clone()).or_default(); + + 'handlers: for handler in &self.handlers { + // Only give handlers ability to update + // certain bucket fields + let mut bucket_mut = BucketMut { + providers: &mut bucket.providers, + dependencies: &mut bucket.dependencies, + }; + + let response = handler.handle(&mut bucket_mut, &mut path)?; + + response + .generated_paths + .into_iter() + .for_each(|path| queue.push_back(path)); + + match response.decision { + Decision::NextHandler => continue 'handlers, + Decision::IgnoreFile { reason } => { + // TODO: Proper logging so we can log from various places + // and have consistent output + eprintln!( + "[analysis] {} - {reason}, ignoring {}", + "WARN".yellow(), + path.target_path.display() + ); + continue 'paths; + } + Decision::IncludeFile => { + bucket.paths.push(path); + continue 'paths; + } + } + } + } + + Ok(()) + } +} + +#[derive(Debug, Default)] +pub struct Bucket { + pub providers: BTreeSet, + pub dependencies: BTreeSet, + pub paths: Vec, +} + +#[derive(Debug)] +pub struct BucketMut<'a> { + pub providers: &'a mut BTreeSet, + pub dependencies: &'a mut BTreeSet, +} + +pub struct Response { + pub decision: Decision, + pub generated_paths: Vec, +} + +pub enum Decision { + NextHandler, + IgnoreFile { reason: String }, + IncludeFile, +} + +impl From for Response { + fn from(decision: Decision) -> Self { + Self { + decision, + generated_paths: vec![], + } + } +} + +pub trait Handler { + fn handle(&self, bucket: &mut BucketMut<'_>, path: &mut PathInfo) -> Result; +} + +impl Handler for T +where + T: Fn(&mut BucketMut<'_>, &mut PathInfo) -> Result, +{ + fn handle(&self, bucket: &mut BucketMut<'_>, path: &mut PathInfo) -> Result { + (self)(bucket, path) + } +} diff --git a/crates/boulder/src/package/analysis/handler.rs b/crates/boulder/src/package/analysis/handler.rs new file mode 100644 index 000000000..b6d11c098 --- /dev/null +++ b/crates/boulder/src/package/analysis/handler.rs @@ -0,0 +1,136 @@ +use std::{path::PathBuf, process::Command}; + +use moss::{dependency, Dependency, Provider}; + +use crate::package::collect::PathInfo; + +pub use self::elf::elf; +use super::{BoxError, BucketMut, Decision, Response}; + +mod elf; + +pub fn include_any(_bucket: &mut BucketMut, _info: &mut PathInfo) -> Result { + Ok(Decision::IncludeFile.into()) +} + +pub fn ignore_blocked(_bucket: &mut BucketMut, info: &mut PathInfo) -> Result { + // non-/usr = bad + if !info.target_path.starts_with("/usr") { + return Ok(Decision::IgnoreFile { + reason: "non /usr/ file".into(), + } + .into()); + } + + // libtool files break the world + if info.file_name().ends_with(".la") && info.target_path.starts_with("/usr/lib") { + return Ok(Decision::IgnoreFile { + reason: "libtool file".into(), + } + .into()); + } + + Ok(Decision::NextHandler.into()) +} + +pub fn binary(bucket: &mut BucketMut, info: &mut PathInfo) -> Result { + if info.target_path.starts_with("/usr/bin") { + let provider = Provider { + kind: dependency::Kind::Binary, + name: info.file_name().to_string(), + }; + bucket.providers.insert(provider); + } else if info.target_path.starts_with("/usr/sbin") { + let provider = Provider { + kind: dependency::Kind::SystemBinary, + name: info.file_name().to_string(), + }; + bucket.providers.insert(provider); + } + + Ok(Decision::NextHandler.into()) +} + +pub fn pkg_config(bucket: &mut BucketMut, info: &mut PathInfo) -> Result { + let file_name = info.file_name(); + + if !info.has_component("pkgconfig") || !file_name.ends_with(".pc") { + return Ok(Decision::NextHandler.into()); + } + + let provider_name = file_name.strip_suffix(".pc").expect("extension exists"); + let emul32 = info.has_component("lib32"); + + let provider = Provider { + kind: if emul32 { + dependency::Kind::PkgConfig32 + } else { + dependency::Kind::PkgConfig + }, + name: provider_name.to_string(), + }; + + bucket.providers.insert(provider); + + let output = Command::new("/usr/bin/pkg-config") + .args(["--print-requires", "--print-requires-private", "--silence-errors"]) + .arg(&info.path) + .envs([ + ("LC_ALL", "C"), + ( + "PKG_CONFIG_PATH", + if emul32 { + "/usr/lib32/pkgconfig:/usr/lib/pkgconfig:/usr/share/pkgconfig" + } else { + "/usr/lib/pkgconfig:/usr/share/pkgconfig" + }, + ), + ]) + .output()?; + let stdout = String::from_utf8(output.stdout)?; + let deps = stdout.lines().filter_map(|line| line.split_whitespace().next()); + + for dep in deps { + let emul32_path = PathBuf::from(format!("/usr/lib32/pkgconfig/{dep}.pc")); + let local_path = info + .path + .parent() + .map(|p| p.join(format!("{dep}.pc"))) + .unwrap_or_default(); + + let kind = if emul32 && (local_path.exists() || emul32_path.exists()) { + dependency::Kind::PkgConfig32 + } else { + dependency::Kind::PkgConfig + }; + + bucket.dependencies.insert(Dependency { + kind, + name: dep.to_string(), + }); + } + + Ok(Decision::NextHandler.into()) +} + +pub fn cmake(bucket: &mut BucketMut, info: &mut PathInfo) -> Result { + let file_name = info.file_name(); + + if (!file_name.ends_with("Config.cmake") && !file_name.ends_with("-config.cmake")) + || file_name.ends_with("-Config.cmake") + { + return Ok(Decision::NextHandler.into()); + } + + let provider_name = file_name + .strip_suffix("Config.cmake") + .or_else(|| file_name.strip_suffix("-config.cmake")) + .expect("extension exists"); + + bucket.providers.insert(Provider { + kind: dependency::Kind::CMake, + name: provider_name.to_string(), + }); + + Ok(Decision::NextHandler.into()) +} diff --git a/crates/boulder/src/package/analysis/handler/elf.rs b/crates/boulder/src/package/analysis/handler/elf.rs new file mode 100644 index 000000000..765929401 --- /dev/null +++ b/crates/boulder/src/package/analysis/handler/elf.rs @@ -0,0 +1,101 @@ +use std::{fs::File, path::Path}; + +use elf::{ + abi::{DT_NEEDED, DT_SONAME}, + endian::AnyEndian, + to_str, +}; +use moss::{dependency, Dependency, Provider}; + +use crate::package::{ + analysis::{BoxError, BucketMut, Decision, Response}, + collect::PathInfo, +}; + +pub fn elf(bucket: &mut BucketMut, info: &mut PathInfo) -> Result { + let file_name = info.file_name(); + + if file_name.ends_with(".debug") && info.has_component("debug") { + return Ok(Decision::NextHandler.into()); + } + if !info.is_file() { + return Ok(Decision::NextHandler.into()); + } + + let Ok(mut elf) = parse(&info.path) else { + return Ok(Decision::NextHandler.into()); + }; + + let machine_isa = to_str::e_machine_to_str(elf.ehdr.e_machine) + .and_then(|s| s.strip_prefix("EM_")) + .unwrap_or_default() + .to_lowercase(); + + parse_dynamic_section(&mut elf, bucket, &machine_isa, file_name); + + Ok(Decision::IncludeFile.into()) +} + +fn parse(path: &Path) -> Result, BoxError> { + let file = File::open(path)?; + Ok(elf::ElfStream::open_stream(file)?) +} + +fn parse_dynamic_section( + elf: &mut elf::ElfStream, + bucket: &mut BucketMut, + machine_isa: &str, + file_name: &str, +) { + let mut dt_needed_offsets = vec![]; + let mut soname_offset = None; + + // Get all dynamic entry offsets into string table + if let Ok(Some(table)) = elf.dynamic() { + for entry in table.iter() { + match entry.d_tag { + DT_NEEDED => { + dt_needed_offsets.push(entry.d_val() as usize); + } + DT_SONAME => { + soname_offset = Some(entry.d_val() as usize); + } + _ => {} + } + } + } + + // Resolve offsets against string table and add the applicable + // depends and provides + if let Ok(Some((_, strtab))) = elf.dynamic_symbol_table() { + // needed = dependency + for offset in dt_needed_offsets { + if let Ok(name) = strtab.get(offset) { + bucket.dependencies.insert(Dependency { + kind: dependency::Kind::SharedLibary, + name: format!("{name}({machine_isa})"), + }); + } + } + + // soname exposed, let's share it + if file_name.contains(".so") { + let mut name = ""; + + if let Some(offset) = soname_offset { + if let Ok(val) = strtab.get(offset) { + name = val; + } + } + + if name.is_empty() { + name = file_name; + } + + bucket.providers.insert(Provider { + kind: dependency::Kind::SharedLibary, + name: format!("{name}({machine_isa})"), + }); + } + } +} diff --git a/crates/boulder/src/package/collect.rs b/crates/boulder/src/package/collect.rs index ac8c4cb9b..4d0cdd03a 100644 --- a/crates/boulder/src/package/collect.rs +++ b/crates/boulder/src/package/collect.rs @@ -2,6 +2,7 @@ // // SPDX-License-Identifier: MPL-2.0 use std::{ + ffi::OsStr, fs::{self, Metadata}, io, os::unix::fs::{FileTypeExt, MetadataExt}, @@ -12,6 +13,7 @@ use glob::Pattern; use moss::stone::payload::{layout, Layout}; use moss::stone::write::digest; use nix::libc::{S_IFDIR, S_IRGRP, S_IROTH, S_IRWXU, S_IXGRP, S_IXOTH}; +use thiserror::Error; #[derive(Debug, Clone, Eq, PartialEq)] pub struct Rule { @@ -29,19 +31,27 @@ impl Rule { } } -#[derive(Debug, Default)] +#[derive(Debug)] pub struct Collector { /// Rules stored in order of /// ascending priority rules: Vec, + root: PathBuf, } impl Collector { + pub fn new(root: impl Into) -> Self { + Self { + rules: vec![], + root: root.into(), + } + } + pub fn add_rule(&mut self, rule: Rule) { self.rules.push(rule); } - pub fn matching_package(&self, path: &str) -> Option<&str> { + fn matching_package(&self, path: &str) -> Option<&str> { // Rev = check highest priority rules first self.rules .iter() @@ -49,26 +59,36 @@ impl Collector { .find_map(|rule| rule.matches(path).then_some(rule.package.as_str())) } - pub fn paths( + /// Produce a [`PathInfo`] from the provided [`Path`] + pub fn path(&self, path: &Path, hasher: &mut digest::Hasher) -> Result { + let metadata = fs::metadata(path)?; + self.path_with_metadata(path.to_path_buf(), &metadata, hasher) + } + + fn path_with_metadata( &self, - root: &Path, - subdir: Option<(PathBuf, Metadata)>, + path: PathBuf, + metadata: &Metadata, hasher: &mut digest::Hasher, - ) -> Result, io::Error> { - let mut paths = vec![]; + ) -> Result { + let target_path = Path::new("/").join(path.strip_prefix(&self.root).expect("path is ancestor of root")); - let add_path = - |path: PathBuf, metadata: Metadata, paths: &mut Vec<(String, PathInfo)>, hasher: &mut digest::Hasher| { - let target_path = Path::new("/").join(path.strip_prefix(root).expect("path is ancestor of root")); + let package = self + .matching_package(target_path.to_str().unwrap_or_default()) + .ok_or(Error::NoMatchingRule)?; - if let Some(package) = self.matching_package(target_path.to_str().unwrap_or_default()) { - paths.push((package.to_string(), PathInfo::new(path, target_path, metadata, hasher)?)) - } + Ok(PathInfo::new(path, target_path, metadata, hasher, package.to_string())?) + } - Ok(()) as Result<(), io::Error> - }; + /// Enumerates all paths from the filesystem starting at root or subdir of root, if provided + pub fn enumerate_paths( + &self, + subdir: Option<(PathBuf, Metadata)>, + hasher: &mut digest::Hasher, + ) -> Result, Error> { + let mut paths = vec![]; - let dir = subdir.as_ref().map(|t| t.0.as_path()).unwrap_or(root); + let dir = subdir.as_ref().map(|t| t.0.as_path()).unwrap_or(&self.root); let entries = fs::read_dir(dir)?; for result in entries { @@ -78,9 +98,9 @@ impl Collector { let host_path = entry.path(); if metadata.is_dir() { - paths.extend(self.paths(root, Some((host_path, metadata)), hasher)?); + paths.extend(self.enumerate_paths(Some((host_path, metadata)), hasher)?); } else { - add_path(host_path, metadata, &mut paths, hasher)?; + paths.push(self.path_with_metadata(host_path, &metadata, hasher)?); } } @@ -94,7 +114,7 @@ impl Collector { let is_special = meta.mode() != REGULAR_DIR_MODE; if meta.is_dir() && (paths.is_empty() || is_special) { - add_path(dir, meta, &mut paths, hasher)?; + paths.push(self.path_with_metadata(dir, &meta, hasher)?); } } @@ -105,69 +125,111 @@ impl Collector { #[derive(Debug)] pub struct PathInfo { pub path: PathBuf, + pub target_path: PathBuf, pub layout: Layout, pub size: u64, + pub package: String, } impl PathInfo { pub fn new( path: PathBuf, target_path: PathBuf, - metadata: Metadata, + metadata: &Metadata, hasher: &mut digest::Hasher, - ) -> Result { - // Strip /usr prefix - let target = target_path - .strip_prefix("/usr") - .unwrap_or(&target_path) - .to_string_lossy() - .to_string(); - - let file_type = metadata.file_type(); - - let layout = Layout { - uid: metadata.uid(), - gid: metadata.gid(), - mode: metadata.mode(), - tag: 0, - entry: if file_type.is_symlink() { - let source = fs::read_link(&path)?; - - layout::Entry::Symlink(source.to_string_lossy().to_string(), target) - } else if file_type.is_dir() { - layout::Entry::Directory(target) - } else if file_type.is_char_device() { - layout::Entry::CharacterDevice(target) - } else if file_type.is_block_device() { - layout::Entry::BlockDevice(target) - } else if file_type.is_fifo() { - layout::Entry::Fifo(target) - } else if file_type.is_socket() { - layout::Entry::Socket(target) - } else { - hasher.reset(); - - let mut digest_writer = digest::Writer::new(io::sink(), hasher); - let mut file = fs::File::open(&path)?; - - // Copy bytes to null sink so we don't - // explode memory - io::copy(&mut file, &mut digest_writer)?; - - let hash = hasher.digest128(); - - layout::Entry::Regular(hash, target) - }, - }; + package: String, + ) -> Result { + let layout = layout_from_metadata(&path, &target_path, metadata, hasher)?; Ok(Self { path, + target_path, layout, size: metadata.size(), + package, }) } + pub fn restat(&mut self, hasher: &mut digest::Hasher) -> Result<(), Error> { + let metadata = fs::metadata(&self.path)?; + self.layout = layout_from_metadata(&self.path, &self.target_path, &metadata, hasher)?; + self.size = metadata.size(); + Ok(()) + } + pub fn is_file(&self) -> bool { matches!(self.layout.entry, layout::Entry::Regular(_, _)) } + + pub fn file_name(&self) -> &str { + self.target_path + .file_name() + .and_then(|p| p.to_str()) + .unwrap_or_default() + } + + pub fn has_component(&self, component: &str) -> bool { + self.target_path + .components() + .any(|c| c.as_os_str() == OsStr::new(component)) + } +} + +fn layout_from_metadata( + path: &Path, + target_path: &Path, + metadata: &Metadata, + hasher: &mut digest::Hasher, +) -> Result { + // Strip /usr + let target = target_path + .strip_prefix("/usr") + .unwrap_or(target_path) + .to_string_lossy() + .to_string(); + + let file_type = metadata.file_type(); + + Ok(Layout { + uid: metadata.uid(), + gid: metadata.gid(), + mode: metadata.mode(), + tag: 0, + entry: if file_type.is_symlink() { + let source = fs::read_link(path)?; + + layout::Entry::Symlink(source.to_string_lossy().to_string(), target) + } else if file_type.is_dir() { + layout::Entry::Directory(target) + } else if file_type.is_char_device() { + layout::Entry::CharacterDevice(target) + } else if file_type.is_block_device() { + layout::Entry::BlockDevice(target) + } else if file_type.is_fifo() { + layout::Entry::Fifo(target) + } else if file_type.is_socket() { + layout::Entry::Socket(target) + } else { + hasher.reset(); + + let mut digest_writer = digest::Writer::new(io::sink(), hasher); + let mut file = fs::File::open(path)?; + + // Copy bytes to null sink so we don't + // explode memory + io::copy(&mut file, &mut digest_writer)?; + + let hash = hasher.digest128(); + + layout::Entry::Regular(hash, target) + }, + }) +} + +#[derive(Debug, Error)] +pub enum Error { + #[error("no matching path rule")] + NoMatchingRule, + #[error("io")] + Io(#[from] io::Error), } diff --git a/crates/boulder/src/package/emit.rs b/crates/boulder/src/package/emit.rs index 88407411d..4799b35b2 100644 --- a/crates/boulder/src/package/emit.rs +++ b/crates/boulder/src/package/emit.rs @@ -11,27 +11,27 @@ use moss::{package::Meta, stone, Dependency}; use thiserror::Error; use self::manifest::Manifest; -use super::collect::PathInfo; +use super::analysis; use crate::{architecture, Architecture, Paths, Recipe}; mod manifest; #[derive(Debug)] -pub struct Package { - pub name: String, +pub struct Package<'a> { + pub name: &'a str, pub build_release: u64, pub architecture: Architecture, - pub source: stone_recipe::Source, - pub definition: stone_recipe::Package, - pub paths: Vec, + pub source: &'a stone_recipe::Source, + pub definition: &'a stone_recipe::Package, + pub analysis: analysis::Bucket, } -impl Package { +impl<'a> Package<'a> { pub fn new( - name: String, - source: stone_recipe::Source, - template: stone_recipe::Package, - paths: Vec, + name: &'a str, + source: &'a stone_recipe::Source, + template: &'a stone_recipe::Package, + analysis: analysis::Bucket, ) -> Self { Self { name, @@ -39,7 +39,7 @@ impl Package { architecture: architecture::host(), source, definition: template, - paths, + analysis, } } @@ -56,7 +56,7 @@ impl Package { pub fn meta(&self) -> Meta { Meta { - name: self.name.clone().into(), + name: self.name.to_string().into(), version_identifier: self.source.version.clone(), source_release: self.source.release, build_release: self.build_release, @@ -66,17 +66,19 @@ impl Package { source_id: self.source.name.clone(), homepage: self.source.homepage.clone(), licenses: self.source.license.clone().into_iter().sorted().collect(), - // TODO: Deps from analyzer dependencies: self - .definition - .run_deps + .analysis + .dependencies .clone() .into_iter() - .filter_map(|dep| dep.parse::().ok()) - .sorted_by_key(|dep| dep.to_string()) + .chain( + self.definition + .run_deps + .iter() + .filter_map(|name| Dependency::from_name(name).ok()), + ) .collect(), - // TODO: Providers from analyzer - providers: Default::default(), + providers: self.analysis.providers.clone(), uri: None, hash: None, download_size: None, @@ -122,7 +124,12 @@ fn emit_package(paths: &Paths, package: &Package) -> Result<(), Error> { // Add layouts { - let layouts = package.paths.iter().map(|p| p.layout.clone()).collect::>(); + let layouts = package + .analysis + .paths + .iter() + .map(|p| p.layout.clone()) + .collect::>(); writer.add_payload(layouts.as_slice())?; } @@ -136,6 +143,7 @@ fn emit_package(paths: &Paths, package: &Package) -> Result<(), Error> { // Sort all files by size, largest to smallest let files = package + .analysis .paths .iter() .filter(|p| p.is_file()) diff --git a/crates/boulder/src/package/emit/manifest.rs b/crates/boulder/src/package/emit/manifest.rs index e271debf8..9850098ff 100644 --- a/crates/boulder/src/package/emit/manifest.rs +++ b/crates/boulder/src/package/emit/manifest.rs @@ -20,7 +20,7 @@ pub struct Manifest<'a> { arch: Architecture, output_dir: PathBuf, build_deps: BTreeSet, - packages: Vec<&'a Package>, + packages: Vec<&'a Package<'a>>, } impl<'a> Manifest<'a> { diff --git a/crates/boulder/src/package/emit/manifest/binary.rs b/crates/boulder/src/package/emit/manifest/binary.rs index c4ec81293..8931f4354 100644 --- a/crates/boulder/src/package/emit/manifest/binary.rs +++ b/crates/boulder/src/package/emit/manifest/binary.rs @@ -5,7 +5,6 @@ use std::{collections::BTreeSet, fs::File, path::Path}; use moss::{ - dependency, stone::{ self, header::v1::FileType, @@ -28,15 +27,12 @@ pub fn write(path: &Path, packages: &[&Package], build_deps: &BTreeSet) // Add build deps for name in build_deps { - let dep = name.parse::().unwrap_or_else(|_| Dependency { - kind: dependency::Kind::PackageName, - name: name.to_string(), - }); - - payload.push(payload::Meta { - tag: meta::Tag::BuildDepends, - kind: meta::Kind::Dependency(dep.kind.into(), dep.name), - }); + if let Ok(dep) = Dependency::from_name(name) { + payload.push(payload::Meta { + tag: meta::Tag::BuildDepends, + kind: meta::Kind::Dependency(dep.kind.into(), dep.name), + }); + } } writer.add_payload(payload.as_slice())?; diff --git a/crates/boulder/src/package/emit/manifest/json.rs b/crates/boulder/src/package/emit/manifest/json.rs index 4c147de98..972a161d0 100644 --- a/crates/boulder/src/package/emit/manifest/json.rs +++ b/crates/boulder/src/package/emit/manifest/json.rs @@ -27,8 +27,20 @@ pub fn write( let name = package.name.to_string(); let build_depends = build_deps.iter().cloned().collect(); + let mut depends = package + .analysis + .dependencies + .iter() + .map(ToString::to_string) + .chain(package.definition.run_deps.clone()) + .collect::>(); + depends.sort(); + depends.dedup(); + + let provides = package.analysis.providers.iter().map(ToString::to_string).collect(); let files = package + .analysis .paths .iter() .map(|p| format!("/usr/{}", p.layout.entry.target())) @@ -37,12 +49,10 @@ pub fn write( let package = Package { build_depends, - // TODO - depends: vec![], + depends, files, name: name.clone(), - // TODO - provides: vec![], + provides, }; (name, package) diff --git a/crates/boulder/src/paths.rs b/crates/boulder/src/paths.rs index a6f29af20..9b98e0265 100644 --- a/crates/boulder/src/paths.rs +++ b/crates/boulder/src/paths.rs @@ -18,7 +18,7 @@ impl Id { } } -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct Paths { id: Id, host_root: PathBuf, diff --git a/crates/moss/src/dependency.rs b/crates/moss/src/dependency.rs index 73b27cb9e..7d80762cc 100644 --- a/crates/moss/src/dependency.rs +++ b/crates/moss/src/dependency.rs @@ -118,6 +118,19 @@ pub struct Dependency { pub name: String, } +impl Dependency { + pub fn from_name(name: &str) -> Result { + if name.contains('(') { + Dependency::from_str(name) + } else { + Ok(Dependency { + kind: Kind::PackageName, + name: name.to_owned(), + }) + } + } +} + impl PartialOrd for Dependency { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) diff --git a/crates/moss/src/package/meta.rs b/crates/moss/src/package/meta.rs index bcb847e6e..3fdd2a574 100644 --- a/crates/moss/src/package/meta.rs +++ b/crates/moss/src/package/meta.rs @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: MPL-2.0 -use std::{collections::HashSet, fmt}; +use std::{collections::BTreeSet, fmt}; use stone::payload; use thiserror::Error; @@ -71,9 +71,9 @@ pub struct Meta { /// Licenses this is available under pub licenses: Vec, /// All dependencies - pub dependencies: HashSet, + pub dependencies: BTreeSet, /// All providers, including name() - pub providers: HashSet, + pub providers: BTreeSet, /// If relevant: uri to fetch from pub uri: Option, /// If relevant: hash for the download From 9269b1eae5918a370f6fbb1b9c35dfeebf9c9309 Mon Sep 17 00:00:00 2001 From: Cory Forsstrom Date: Mon, 26 Feb 2024 07:56:54 -0800 Subject: [PATCH 07/26] Add elf splitting & stripping --- crates/boulder/src/package.rs | 2 +- crates/boulder/src/package/analysis.rs | 44 +++-- .../src/package/analysis/handler/elf.rs | 162 ++++++++++++++++-- crates/boulder/src/package/collect.rs | 2 +- 4 files changed, 185 insertions(+), 25 deletions(-) diff --git a/crates/boulder/src/package.rs b/crates/boulder/src/package.rs index a63313937..3be650243 100644 --- a/crates/boulder/src/package.rs +++ b/crates/boulder/src/package.rs @@ -69,7 +69,7 @@ impl Packager { // Process all paths with the analysis chain // This will determine which files get included // and what deps / provides they produce - let mut analysis = analysis::Chain::new(); + let mut analysis = analysis::Chain::new(&self.paths, &self.recipe, &self.collector, &mut hasher); analysis.process(paths).map_err(Error::Analysis)?; // Combine the package definition with the analysis results diff --git a/crates/boulder/src/package/analysis.rs b/crates/boulder/src/package/analysis.rs index 1e2859ad9..c43456a9a 100644 --- a/crates/boulder/src/package/analysis.rs +++ b/crates/boulder/src/package/analysis.rs @@ -2,24 +2,32 @@ // // SPDX-License-Identifier: MPL-2.0 -use std::collections::{BTreeSet, HashMap, VecDeque}; +use std::{ + collections::{BTreeSet, HashMap, VecDeque}, + path::PathBuf, +}; -use moss::{Dependency, Provider}; +use moss::{stone::write::digest, Dependency, Provider}; use tui::Stylize; -use super::collect::PathInfo; +use super::collect::{Collector, PathInfo}; +use crate::{Paths, Recipe}; mod handler; pub type BoxError = Box; -pub struct Chain { +pub struct Chain<'a> { handlers: Vec>, + recipe: &'a Recipe, + paths: &'a Paths, + collector: &'a Collector, + hasher: &'a mut digest::Hasher, pub buckets: HashMap, } -impl Chain { - pub fn new() -> Self { +impl<'a> Chain<'a> { + pub fn new(paths: &'a Paths, recipe: &'a Recipe, collector: &'a Collector, hasher: &'a mut digest::Hasher) -> Self { Self { handlers: vec![ Box::new(handler::ignore_blocked), @@ -30,6 +38,10 @@ impl Chain { // Catch-all if not excluded Box::new(handler::include_any), ], + paths, + recipe, + collector, + hasher, buckets: Default::default(), } } @@ -46,14 +58,20 @@ impl Chain { let mut bucket_mut = BucketMut { providers: &mut bucket.providers, dependencies: &mut bucket.dependencies, + hasher: self.hasher, + recipe: self.recipe, + paths: self.paths, }; let response = handler.handle(&mut bucket_mut, &mut path)?; - response - .generated_paths - .into_iter() - .for_each(|path| queue.push_back(path)); + response.generated_paths.into_iter().try_for_each(|path| { + let info = self.collector.path(&path, self.hasher)?; + + queue.push_back(info); + + Ok(()) as Result<(), BoxError> + })?; match response.decision { Decision::NextHandler => continue 'handlers, @@ -86,15 +104,17 @@ pub struct Bucket { pub paths: Vec, } -#[derive(Debug)] pub struct BucketMut<'a> { pub providers: &'a mut BTreeSet, pub dependencies: &'a mut BTreeSet, + pub hasher: &'a mut digest::Hasher, + pub recipe: &'a Recipe, + pub paths: &'a Paths, } pub struct Response { pub decision: Decision, - pub generated_paths: Vec, + pub generated_paths: Vec, } pub enum Decision { diff --git a/crates/boulder/src/package/analysis/handler/elf.rs b/crates/boulder/src/package/analysis/handler/elf.rs index 765929401..0586cadf2 100644 --- a/crates/boulder/src/package/analysis/handler/elf.rs +++ b/crates/boulder/src/package/analysis/handler/elf.rs @@ -1,15 +1,25 @@ -use std::{fs::File, path::Path}; +use std::{ + fs::File, + path::{Path, PathBuf}, + process::Command, +}; use elf::{ abi::{DT_NEEDED, DT_SONAME}, endian::AnyEndian, + file::Class, + note::Note, to_str, }; use moss::{dependency, Dependency, Provider}; - -use crate::package::{ - analysis::{BoxError, BucketMut, Decision, Response}, - collect::PathInfo, +use stone_recipe::tuning::Toolchain; + +use crate::{ + package::{ + analysis::{BoxError, BucketMut, Decision, Response}, + collect::PathInfo, + }, + util, }; pub fn elf(bucket: &mut BucketMut, info: &mut PathInfo) -> Result { @@ -22,7 +32,7 @@ pub fn elf(bucket: &mut BucketMut, info: &mut PathInfo) -> Result Result { + // Add new split file to be analyzed + generated_paths.push(path); + } + Ok(None) => {} + // TODO: Error logging + Err(err) => { + eprintln!("error splitting debug info from {}: {err}", info.path.display()); + } + } + + if let Err(err) = strip(bucket, info) { + // TODO: Error logging + eprintln!("error stripping {}: {err}", info.path.display()); + } + + // Restat original file after split & strip + info.restat(bucket.hasher)?; + } + + Ok(Response { + decision: Decision::IncludeFile, + generated_paths, + }) } -fn parse(path: &Path) -> Result, BoxError> { +fn parse_elf(path: &Path) -> Result, BoxError> { let file = File::open(path)?; Ok(elf::ElfStream::open_stream(file)?) } @@ -47,7 +87,7 @@ fn parse_dynamic_section( machine_isa: &str, file_name: &str, ) { - let mut dt_needed_offsets = vec![]; + let mut needed_offsets = vec![]; let mut soname_offset = None; // Get all dynamic entry offsets into string table @@ -55,7 +95,7 @@ fn parse_dynamic_section( for entry in table.iter() { match entry.d_tag { DT_NEEDED => { - dt_needed_offsets.push(entry.d_val() as usize); + needed_offsets.push(entry.d_val() as usize); } DT_SONAME => { soname_offset = Some(entry.d_val() as usize); @@ -69,7 +109,7 @@ fn parse_dynamic_section( // depends and provides if let Ok(Some((_, strtab))) = elf.dynamic_symbol_table() { // needed = dependency - for offset in dt_needed_offsets { + for offset in needed_offsets { if let Ok(name) = strtab.get(offset) { bucket.dependencies.insert(Dependency { kind: dependency::Kind::SharedLibary, @@ -99,3 +139,103 @@ fn parse_dynamic_section( } } } + +fn parse_build_id(elf: &mut elf::ElfStream) -> Option { + let section = *elf.section_header_by_name(".note.gnu.build-id").ok()??; + let notes = elf.section_data_as_notes(§ion).ok()?; + + for note in notes { + if let Note::GnuBuildId(build_id) = note { + let build_id = hex::encode(build_id.0); + return Some(build_id); + } + } + + None +} + +fn split_debug( + bucket: &BucketMut, + info: &PathInfo, + bit_size: Class, + build_id: &str, +) -> Result, BoxError> { + let use_llvm = matches!(bucket.recipe.parsed.options.toolchain, Toolchain::Llvm); + let objcopy = if use_llvm { + "/usr/bin/llvm-objcopy" + } else { + "/usr/bin/objcopy" + }; + + let debug_dir = if matches!(bit_size, Class::ELF64) { + Path::new("usr/lib/debug/.build-id") + } else { + Path::new("usr/lib32/debug/.build-id") + }; + let debug_info_relative_dir = debug_dir.join(&build_id[..2]); + let debug_info_dir = bucket.paths.install().guest.join(debug_info_relative_dir); + let debug_info_path = debug_info_dir.join(format!("{}.debug", &build_id[2..])); + + // Is it possible we already split this? + if debug_info_path.exists() { + return Ok(None); + } + + util::sync::ensure_dir_exists(&debug_info_dir)?; + + let output = Command::new(objcopy) + .arg("--only-keep-debug") + .arg(&info.path) + .arg(&debug_info_path) + .output()?; + + if !output.status.success() { + return Err(String::from_utf8(output.stderr).unwrap_or_default().into()); + } + + let output = Command::new(objcopy) + .arg("--add-gnu-debuglink") + .arg(&debug_info_path) + .arg(&info.path) + .output()?; + + if !output.status.success() { + return Err(String::from_utf8(output.stderr).unwrap_or_default().into()); + } + + Ok(Some(debug_info_path)) +} + +fn strip(bucket: &BucketMut, info: &PathInfo) -> Result<(), BoxError> { + if !bucket.recipe.parsed.options.strip { + return Ok(()); + } + + let use_llvm = matches!(bucket.recipe.parsed.options.toolchain, Toolchain::Llvm); + let strip = if use_llvm { + "/usr/bin/llvm-strip" + } else { + "/usr/bin/strip" + }; + let is_executable = info + .path + .parent() + .map(|parent| parent.ends_with("bin") || parent.ends_with("sbin")) + .unwrap_or_default(); + + let mut command = Command::new(strip); + + if is_executable { + command.arg(&info.path); + } else { + command.args(["-g", "--strip-unneeded"]).arg(&info.path); + } + + let output = command.output()?; + + if !output.status.success() { + return Err(String::from_utf8(output.stderr).unwrap_or_default().into()); + } + + Ok(()) +} diff --git a/crates/boulder/src/package/collect.rs b/crates/boulder/src/package/collect.rs index 4d0cdd03a..08c60115a 100644 --- a/crates/boulder/src/package/collect.rs +++ b/crates/boulder/src/package/collect.rs @@ -77,7 +77,7 @@ impl Collector { .matching_package(target_path.to_str().unwrap_or_default()) .ok_or(Error::NoMatchingRule)?; - Ok(PathInfo::new(path, target_path, metadata, hasher, package.to_string())?) + PathInfo::new(path, target_path, metadata, hasher, package.to_string()) } /// Enumerates all paths from the filesystem starting at root or subdir of root, if provided From 487c5c363f6331f864856d975c327a30857e5f0c Mon Sep 17 00:00:00 2001 From: Cory Forsstrom Date: Mon, 26 Feb 2024 08:31:30 -0800 Subject: [PATCH 08/26] Add interpreter parsing --- .../src/package/analysis/handler/elf.rs | 67 ++++++++++++++++--- 1 file changed, 59 insertions(+), 8 deletions(-) diff --git a/crates/boulder/src/package/analysis/handler/elf.rs b/crates/boulder/src/package/analysis/handler/elf.rs index 0586cadf2..fa1343710 100644 --- a/crates/boulder/src/package/analysis/handler/elf.rs +++ b/crates/boulder/src/package/analysis/handler/elf.rs @@ -1,4 +1,5 @@ use std::{ + ffi::CStr, fs::File, path::{Path, PathBuf}, process::Command, @@ -42,7 +43,8 @@ pub fn elf(bucket: &mut BucketMut, info: &mut PathInfo) -> Result Result { + Ok(Some(debug_path)) => { // Add new split file to be analyzed - generated_paths.push(path); + generated_paths.push(debug_path); } Ok(None) => {} // TODO: Error logging @@ -85,6 +87,8 @@ fn parse_dynamic_section( elf: &mut elf::ElfStream, bucket: &mut BucketMut, machine_isa: &str, + bit_size: Class, + info: &PathInfo, file_name: &str, ) { let mut needed_offsets = vec![]; @@ -120,26 +124,73 @@ fn parse_dynamic_section( // soname exposed, let's share it if file_name.contains(".so") { - let mut name = ""; + let mut soname = ""; if let Some(offset) = soname_offset { if let Ok(val) = strtab.get(offset) { - name = val; + soname = val; } } - if name.is_empty() { - name = file_name; + if soname.is_empty() { + soname = file_name; } bucket.providers.insert(Provider { kind: dependency::Kind::SharedLibary, - name: format!("{name}({machine_isa})"), + name: format!("{soname}({machine_isa})"), }); + + // Do we possibly have an Interpeter? This is a .dynamic library .. + if soname.starts_with("ld-") && info.target_path.starts_with("/usr/bin") { + let interp_paths = if matches!(bit_size, Class::ELF64) { + [ + format!("/usr/lib64/{soname}({machine_isa})"), + format!("/lib64/{soname}({machine_isa})"), + format!("/lib/{soname}({machine_isa})"), + format!("{}({machine_isa})", info.target_path.display()), + ] + } else { + [ + format!("/usr/lib/{soname}({machine_isa})"), + format!("/lib32/{soname}({machine_isa})"), + format!("/lib/{soname}({machine_isa})"), + format!("{}({machine_isa})", info.target_path.display()), + ] + }; + + for path in interp_paths { + bucket.providers.insert(Provider { + kind: dependency::Kind::Interpreter, + name: path.clone(), + }); + bucket.providers.insert(Provider { + kind: dependency::Kind::SharedLibary, + name: path, + }); + } + } } } } +fn parse_interp_section(elf: &mut elf::ElfStream, bucket: &mut BucketMut, machine_isa: &str) { + let Some(section) = elf.section_header_by_name(".interp").ok().flatten().copied() else { + return; + }; + + let Ok((data, _)) = elf.section_data(§ion) else { + return; + }; + + if let Some(content) = CStr::from_bytes_until_nul(data).ok().and_then(|s| s.to_str().ok()) { + bucket.dependencies.insert(Dependency { + kind: dependency::Kind::Interpreter, + name: format!("{content}({machine_isa})"), + }); + } +} + fn parse_build_id(elf: &mut elf::ElfStream) -> Option { let section = *elf.section_header_by_name(".note.gnu.build-id").ok()??; let notes = elf.section_data_as_notes(§ion).ok()?; From 4c069fe94b83803bdb5dd31ff42c14ac36c460f0 Mon Sep 17 00:00:00 2001 From: Cory Forsstrom Date: Mon, 26 Feb 2024 08:56:40 -0800 Subject: [PATCH 09/26] Don't emit depends on items a package provides --- crates/boulder/src/package/analysis.rs | 17 +++++++++++++++-- crates/boulder/src/package/emit.rs | 7 +++---- .../boulder/src/package/emit/manifest/json.rs | 5 ++--- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/crates/boulder/src/package/analysis.rs b/crates/boulder/src/package/analysis.rs index c43456a9a..672869857 100644 --- a/crates/boulder/src/package/analysis.rs +++ b/crates/boulder/src/package/analysis.rs @@ -99,11 +99,24 @@ impl<'a> Chain<'a> { #[derive(Debug, Default)] pub struct Bucket { - pub providers: BTreeSet, - pub dependencies: BTreeSet, + providers: BTreeSet, + dependencies: BTreeSet, pub paths: Vec, } +impl Bucket { + pub fn providers(&self) -> impl Iterator { + self.providers.iter() + } + + pub fn dependencies(&self) -> impl Iterator { + // We shouldn't self depend on things we provide + self.dependencies + .iter() + .filter(|d| !self.providers.iter().any(|p| p.kind == d.kind && p.name == d.name)) + } +} + pub struct BucketMut<'a> { pub providers: &'a mut BTreeSet, pub dependencies: &'a mut BTreeSet, diff --git a/crates/boulder/src/package/emit.rs b/crates/boulder/src/package/emit.rs index 4799b35b2..eb069f94c 100644 --- a/crates/boulder/src/package/emit.rs +++ b/crates/boulder/src/package/emit.rs @@ -68,9 +68,8 @@ impl<'a> Package<'a> { licenses: self.source.license.clone().into_iter().sorted().collect(), dependencies: self .analysis - .dependencies - .clone() - .into_iter() + .dependencies() + .cloned() .chain( self.definition .run_deps @@ -78,7 +77,7 @@ impl<'a> Package<'a> { .filter_map(|name| Dependency::from_name(name).ok()), ) .collect(), - providers: self.analysis.providers.clone(), + providers: self.analysis.providers().cloned().collect(), uri: None, hash: None, download_size: None, diff --git a/crates/boulder/src/package/emit/manifest/json.rs b/crates/boulder/src/package/emit/manifest/json.rs index 972a161d0..0552cead0 100644 --- a/crates/boulder/src/package/emit/manifest/json.rs +++ b/crates/boulder/src/package/emit/manifest/json.rs @@ -29,15 +29,14 @@ pub fn write( let build_depends = build_deps.iter().cloned().collect(); let mut depends = package .analysis - .dependencies - .iter() + .dependencies() .map(ToString::to_string) .chain(package.definition.run_deps.clone()) .collect::>(); depends.sort(); depends.dedup(); - let provides = package.analysis.providers.iter().map(ToString::to_string).collect(); + let provides = package.analysis.providers().map(ToString::to_string).collect(); let files = package .analysis From a316dc2e94b4f03ca68aea0c89a1726cc13d537c Mon Sep 17 00:00:00 2001 From: Cory Forsstrom Date: Mon, 26 Feb 2024 12:23:10 -0800 Subject: [PATCH 10/26] Add tui output --- crates/boulder/src/package/analysis.rs | 34 +++++++++--- crates/boulder/src/package/emit.rs | 56 ++++++++++++++------ crates/moss/src/cli/extract.rs | 38 +------------- crates/tui/src/lib.rs | 72 ++++++++++++++++++++++++++ 4 files changed, 141 insertions(+), 59 deletions(-) diff --git a/crates/boulder/src/package/analysis.rs b/crates/boulder/src/package/analysis.rs index 672869857..33267611a 100644 --- a/crates/boulder/src/package/analysis.rs +++ b/crates/boulder/src/package/analysis.rs @@ -8,7 +8,7 @@ use std::{ }; use moss::{stone::write::digest, Dependency, Provider}; -use tui::Stylize; +use tui::{ProgressBar, ProgressStyle, Stylize}; use super::collect::{Collector, PathInfo}; use crate::{Paths, Recipe}; @@ -47,11 +47,24 @@ impl<'a> Chain<'a> { } pub fn process(&mut self, paths: impl IntoIterator) -> Result<(), BoxError> { + println!("Analyzing artefacts\n"); + let mut queue = paths.into_iter().collect::>(); + let pb = ProgressBar::new(queue.len() as u64) + .with_message("Analyzing") + .with_style( + ProgressStyle::with_template("\n|{bar:20.red/blue}| {pos}/{len} {wide_msg}") + .unwrap() + .progress_chars("■≡=- "), + ); + pb.tick(); + 'paths: while let Some(mut path) = queue.pop_front() { let bucket = self.buckets.entry(path.package.clone()).or_default(); + pb.set_message(format!("Analyzing {}", path.target_path.display())); + 'handlers: for handler in &self.handlers { // Only give handlers ability to update // certain bucket fields @@ -76,16 +89,18 @@ impl<'a> Chain<'a> { match response.decision { Decision::NextHandler => continue 'handlers, Decision::IgnoreFile { reason } => { - // TODO: Proper logging so we can log from various places - // and have consistent output - eprintln!( - "[analysis] {} - {reason}, ignoring {}", - "WARN".yellow(), - path.target_path.display() - ); + pb.println(format!( + "{} {}{}", + "Ignored ".yellow(), + path.target_path.display(), + format!(" ({reason})").dim() + )); + pb.inc(1); continue 'paths; } Decision::IncludeFile => { + pb.println(format!("{} {}", "Included".green(), path.target_path.display())); + pb.inc(1); bucket.paths.push(path); continue 'paths; } @@ -93,6 +108,9 @@ impl<'a> Chain<'a> { } } + pb.finish_and_clear(); + println!(); + Ok(()) } } diff --git a/crates/boulder/src/package/emit.rs b/crates/boulder/src/package/emit.rs index eb069f94c..6632f9f01 100644 --- a/crates/boulder/src/package/emit.rs +++ b/crates/boulder/src/package/emit.rs @@ -4,11 +4,13 @@ use std::{ fs::{self, File}, io::{self, Write}, + time::Duration, }; use itertools::Itertools; use moss::{package::Meta, stone, Dependency}; use thiserror::Error; +use tui::{ProgressBar, ProgressReader, ProgressStyle, Stylize}; use self::manifest::Manifest; use super::analysis; @@ -88,6 +90,8 @@ impl<'a> Package<'a> { pub fn emit(paths: &Paths, recipe: &Recipe, packages: &[Package]) -> Result<(), Error> { let mut manfiest = Manifest::new(paths, recipe, architecture::host()); + println!("Emitting packages\n"); + for package in packages { if !package.is_dbginfo() { manfiest.add_package(package); @@ -105,6 +109,25 @@ pub fn emit(paths: &Paths, recipe: &Recipe, packages: &[Package]) -> Result<(), fn emit_package(paths: &Paths, package: &Package) -> Result<(), Error> { let filename = package.filename(); + // Sort all files by size, largest to smallest + let sorted_files = package + .analysis + .paths + .iter() + .filter(|p| p.is_file()) + .sorted_by(|a, b| a.size.cmp(&b.size).reverse()) + .collect::>(); + let total_file_size = sorted_files.iter().map(|p| p.size).sum(); + + let pb = ProgressBar::new(total_file_size) + .with_message(format!("Generating {filename}")) + .with_style( + ProgressStyle::with_template(" {spinner} |{percent:>3}%| {wide_msg} {binary_bytes_per_sec:>.dim} ") + .unwrap() + .tick_chars("--=≡■≡=--"), + ); + pb.enable_steady_tick(Duration::from_millis(150)); + // Output file to artefacts directory let out_path = paths.artefacts().guest.join(&filename); if out_path.exists() { @@ -140,24 +163,24 @@ fn emit_package(paths: &Paths, package: &Package) -> Result<(), Error> { .create(true) .open(&temp_content_path)?; - // Sort all files by size, largest to smallest - let files = package - .analysis - .paths - .iter() - .filter(|p| p.is_file()) - .sorted_by(|a, b| a.size.cmp(&b.size).reverse()) - .collect::>(); - // Convert to content writer using pledged size = total size of all files - let pledged_size = files.iter().map(|p| p.size).sum(); - let mut writer = writer.with_content(&mut temp_content, Some(pledged_size))?; + let mut writer = writer.with_content(&mut temp_content, Some(total_file_size))?; - // Add each file content - for info in files { - let mut file = File::open(&info.path)?; + let mut total_read = 0; - writer.add_content(&mut file)?; + // Add each file content + for file in sorted_files { + let mut file = File::open(&file.path)?; + let mut progress_reader = ProgressReader { + reader: &mut file, + total: total_file_size, + read: total_read, + progress: pb.clone(), + }; + + writer.add_content(&mut progress_reader)?; + + total_read = progress_reader.read; } // Finalize & flush @@ -167,6 +190,9 @@ fn emit_package(paths: &Paths, package: &Package) -> Result<(), Error> { // Remove temp content file fs::remove_file(temp_content_path)?; + pb.println(format!("{} {filename}", "Emitted".green())); + pb.finish_and_clear(); + Ok(()) } diff --git a/crates/moss/src/cli/extract.rs b/crates/moss/src/cli/extract.rs index 3974e840c..6e9dd13cf 100644 --- a/crates/moss/src/cli/extract.rs +++ b/crates/moss/src/cli/extract.rs @@ -4,7 +4,7 @@ use std::{ fs::{create_dir_all, hard_link, remove_dir_all, remove_file, File}, - io::{copy, Read, Seek, SeekFrom, Write}, + io::{copy, Read, Seek, SeekFrom}, os::unix::fs::symlink, path::PathBuf, }; @@ -15,7 +15,7 @@ use rayon::prelude::{IntoParallelRefIterator, ParallelIterator}; use stone::{payload::layout, read::PayloadKind}; use thiserror::{self, Error}; use tokio::task; -use tui::{ProgressBar, ProgressStyle}; +use tui::{ProgressBar, ProgressStyle, ProgressWriter}; pub fn command() -> Command { Command::new("extract") @@ -163,37 +163,3 @@ pub enum Error { #[error("stone format")] Format(#[from] stone::read::Error), } - -struct ProgressWriter { - writer: W, - total: u64, - written: u64, - progress: ProgressBar, -} - -impl ProgressWriter { - pub fn new(writer: W, total: u64, progress: ProgressBar) -> Self { - Self { - writer, - total, - written: 0, - progress, - } - } -} - -impl Write for ProgressWriter { - fn write(&mut self, buf: &[u8]) -> std::io::Result { - let bytes = self.writer.write(buf)?; - - self.written += bytes as u64; - self.progress - .set_position((self.written as f64 / self.total as f64 * 1000.0) as u64); - - Ok(bytes) - } - - fn flush(&mut self) -> std::io::Result<()> { - self.writer.flush() - } -} diff --git a/crates/tui/src/lib.rs b/crates/tui/src/lib.rs index d18ee9366..d5f761d9e 100644 --- a/crates/tui/src/lib.rs +++ b/crates/tui/src/lib.rs @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: MPL-2.0 +use std::io::{Read, Write}; + pub use self::reexport::*; pub mod pretty; @@ -28,6 +30,76 @@ pub fn term_size() -> TermSize { } } +/// Wraps a [`Write`] and updates the provided [`ProgressBar`] with progress +/// of total bytes written +pub struct ProgressWriter { + pub writer: W, + pub total: u64, + pub written: u64, + pub progress: ProgressBar, +} + +impl ProgressWriter { + pub fn new(writer: W, total: u64, progress: ProgressBar) -> Self { + Self { + writer, + total, + written: 0, + progress, + } + } +} + +impl Write for ProgressWriter { + fn write(&mut self, buf: &[u8]) -> std::io::Result { + let bytes = self.writer.write(buf)?; + + self.written += bytes as u64; + self.progress.set_position( + (self.written as f64 / self.total as f64 * self.progress.length().unwrap_or_default() as f64) as u64, + ); + + Ok(bytes) + } + + fn flush(&mut self) -> std::io::Result<()> { + self.writer.flush() + } +} + +/// Wraps a [`Read`] and updates the provided [`ProgressBar`] with progress +/// of total bytes read +pub struct ProgressReader { + pub reader: R, + pub total: u64, + pub read: u64, + pub progress: ProgressBar, +} + +impl ProgressReader { + pub fn new(reader: R, total: u64, progress: ProgressBar) -> Self { + Self { + reader, + total, + read: 0, + progress, + } + } +} + +impl Read for ProgressReader { + fn read(&mut self, buf: &mut [u8]) -> std::io::Result { + let read = self.reader.read(buf)?; + + self.read += read as u64; + self.progress.set_position( + (self.read as f64 / self.total as f64 * self.progress.length().unwrap_or_default() as f64) as u64, + ); + + Ok(read) + } +} + /// Provide a standard approach to ratatui based TUI in moss mod reexport { pub use crossterm::style::Stylize; From 70bd159bf9edcf4124bc4fd44868ae0245a85f4d Mon Sep 17 00:00:00 2001 From: Cory Forsstrom Date: Mon, 26 Feb 2024 13:22:26 -0800 Subject: [PATCH 11/26] Rename step to phase --- crates/boulder/src/build.rs | 40 +++++++------- crates/boulder/src/build/job.rs | 16 +++--- .../src/build/job/{step.rs => phase.rs} | 54 +++++++++---------- crates/boulder/src/cli/chroot.rs | 6 +-- 4 files changed, 58 insertions(+), 58 deletions(-) rename crates/boulder/src/build/job/{step.rs => phase.rs} (90%) diff --git a/crates/boulder/src/build.rs b/crates/boulder/src/build.rs index eced8d69d..5fdb36902 100644 --- a/crates/boulder/src/build.rs +++ b/crates/boulder/src/build.rs @@ -93,7 +93,7 @@ impl Builder { pub fn extra_deps(&self) -> impl Iterator { self.targets.iter().flat_map(|target| { target.jobs.iter().flat_map(|job| { - job.steps + job.phases .values() .flat_map(|script| script.dependencies.iter().map(String::as_str)) }) @@ -157,7 +157,7 @@ impl Builder { println!("{}", format!("│pgo-{stage}").dim()); } - for (i, (step, script)) in job.steps.iter().enumerate() { + for (i, (phase, script)) in job.phases.iter().enumerate() { let pipes = if job.pgo_stage.is_some() { "││".dim() } else { @@ -167,7 +167,7 @@ impl Builder { if i > 0 { println!("{pipes}"); } - println!("{pipes}{}", step.styled(format!("{step}"))); + println!("{pipes}{}", phase.styled(format!("{phase}"))); let build_dir = &job.build_dir; let work_dir = &job.work_dir; @@ -176,7 +176,7 @@ impl Builder { for command in &script.commands { match command { script::Command::Break(breakpoint) => { - let line_num = breakpoint_line(breakpoint, &self.recipe, job.target, *step) + let line_num = breakpoint_line(breakpoint, &self.recipe, job.target, *phase) .map(|line_num| format!(" at line {line_num}")) .unwrap_or_default(); @@ -217,7 +217,7 @@ impl Builder { let script_path = "/tmp/script"; std::fs::write(script_path, content).unwrap(); - let result = logged(*step, is_pgo, "/bin/sh", |command| { + let result = logged(*phase, is_pgo, "/bin/sh", |command| { command .arg(script_path) .env_clear() @@ -263,7 +263,7 @@ impl Builder { } fn logged( - step: job::Step, + phase: job::Phase, is_pgo: bool, command: &str, f: impl FnOnce(&mut process::Command) -> &mut process::Command, @@ -278,8 +278,8 @@ fn logged( .spawn()?; // Log stdout and stderr - let stdout_log = log(step, is_pgo, child.stdout.take().unwrap()); - let stderr_log = log(step, is_pgo, child.stderr.take().unwrap()); + let stdout_log = log(phase, is_pgo, child.stdout.take().unwrap()); + let stderr_log = log(phase, is_pgo, child.stderr.take().unwrap()); // Forward SIGINT to this process ::container::forward_sigint(Pid::from_raw(child.id() as i32))?; @@ -292,7 +292,7 @@ fn logged( Ok(result) } -fn log(step: job::Step, is_pgo: bool, pipe: R) -> thread::JoinHandle<()> +fn log(phase: job::Phase, is_pgo: bool, pipe: R) -> thread::JoinHandle<()> where R: io::Read + Send + 'static, { @@ -300,7 +300,7 @@ where thread::spawn(move || { let pgo = is_pgo.then_some("│").unwrap_or_default().dim(); - let kind = step.styled(format!("{}│", step.abbrev())); + let kind = phase.styled(format!("{}│", phase.abbrev())); let tag = format!("{}{pgo}{kind}", "│".dim()); let mut lines = io::BufReader::new(pipe).lines(); @@ -339,7 +339,7 @@ fn breakpoint_line( breakpoint: &Breakpoint, recipe: &Recipe, build_target: BuildTarget, - step: job::Step, + phase: job::Phase, ) -> Option { let profile = recipe.build_target_profile_key(build_target); @@ -373,18 +373,18 @@ fn breakpoint_line( } }); - let step = match step { - // Internal step, no breakpoint will occur - job::Step::Prepare => return None, - job::Step::Setup => "setup", - job::Step::Build => "build", - job::Step::Install => "install", - job::Step::Check => "check", - job::Step::Workload => "workload", + let phase = match phase { + // Internal phase, no breakpoint will occur + job::Phase::Prepare => return None, + job::Phase::Setup => "setup", + job::Phase::Build => "build", + job::Phase::Install => "install", + job::Phase::Check => "check", + job::Phase::Workload => "workload", }; lines.find_map(|(mut line_num, line)| { - if has_key(line, step) { + if has_key(line, phase) { // 0 based to 1 based line_num += 1; diff --git a/crates/boulder/src/build/job.rs b/crates/boulder/src/build/job.rs index 47e112ca7..5b7d55177 100644 --- a/crates/boulder/src/build/job.rs +++ b/crates/boulder/src/build/job.rs @@ -11,17 +11,17 @@ use std::{ use stone_recipe::{script, tuning, Script, Upstream}; use thiserror::Error; -pub use self::step::Step; +pub use self::phase::Phase; use crate::build::pgo; use crate::{architecture::BuildTarget, util, Macros, Paths, Recipe}; -mod step; +mod phase; #[derive(Debug)] pub struct Job { pub target: BuildTarget, pub pgo_stage: Option, - pub steps: BTreeMap, + pub phases: BTreeMap, pub work_dir: PathBuf, pub build_dir: PathBuf, } @@ -38,20 +38,20 @@ impl Job { let build_dir = paths.build().guest.join(target.to_string()); let work_dir = work_dir(&build_dir, &recipe.parsed.upstreams); - let steps = step::list(pgo_stage) + let phases = phase::list(pgo_stage) .into_iter() - .filter_map(|step| { - let result = step + .filter_map(|phase| { + let result = phase .script(target, pgo_stage, recipe, paths, macros, ccache) .transpose()?; - Some(result.map(|script| (step, script))) + Some(result.map(|script| (phase, script))) }) .collect::>()?; Ok(Self { target, pgo_stage, - steps, + phases, work_dir, build_dir, }) diff --git a/crates/boulder/src/build/job/step.rs b/crates/boulder/src/build/job/phase.rs similarity index 90% rename from crates/boulder/src/build/job/step.rs rename to crates/boulder/src/build/job/phase.rs index 423e07c6c..011f610b7 100644 --- a/crates/boulder/src/build/job/step.rs +++ b/crates/boulder/src/build/job/phase.rs @@ -17,16 +17,16 @@ use super::{work_dir, Error}; use crate::build::pgo; use crate::{architecture::BuildTarget, util, Macros, Paths, Recipe}; -pub fn list(pgo_stage: Option) -> Vec { +pub fn list(pgo_stage: Option) -> Vec { if matches!(pgo_stage, Some(pgo::Stage::One | pgo::Stage::Two)) { - Step::WORKLOAD.to_vec() + Phase::WORKLOAD.to_vec() } else { - Step::NORMAL.to_vec() + Phase::NORMAL.to_vec() } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, strum::Display)] -pub enum Step { +pub enum Phase { Prepare, Setup, Build, @@ -35,18 +35,18 @@ pub enum Step { Workload, } -impl Step { - const NORMAL: &'static [Self] = &[Step::Prepare, Step::Setup, Step::Build, Step::Install, Step::Check]; - const WORKLOAD: &'static [Self] = &[Step::Prepare, Step::Setup, Step::Build, Step::Workload]; +impl Phase { + const NORMAL: &'static [Self] = &[Phase::Prepare, Phase::Setup, Phase::Build, Phase::Install, Phase::Check]; + const WORKLOAD: &'static [Self] = &[Phase::Prepare, Phase::Setup, Phase::Build, Phase::Workload]; pub fn abbrev(&self) -> &str { match self { - Step::Prepare => "P", - Step::Setup => "S", - Step::Build => "B", - Step::Install => "I", - Step::Check => "C", - Step::Workload => "W", + Phase::Prepare => "P", + Phase::Setup => "S", + Phase::Build => "B", + Phase::Install => "I", + Phase::Check => "C", + Phase::Workload => "W", } } @@ -55,12 +55,12 @@ impl Step { // Taste the rainbow // TODO: Ikey plz make pretty match self { - Step::Prepare => s.grey(), - Step::Setup => s.cyan(), - Step::Build => s.blue(), - Step::Check => s.yellow(), - Step::Install => s.green(), - Step::Workload => s.magenta(), + Phase::Prepare => s.grey(), + Phase::Setup => s.cyan(), + Phase::Build => s.blue(), + Phase::Check => s.yellow(), + Phase::Install => s.green(), + Phase::Workload => s.magenta(), } .dim() .to_string() @@ -78,12 +78,12 @@ impl Step { let build = recipe.build_target_definition(target); let Some(content) = (match self { - Step::Prepare => Some(prepare_script(&recipe.parsed.upstreams)), - Step::Setup => build.setup.clone(), - Step::Build => build.build.clone(), - Step::Check => build.check.clone(), - Step::Install => build.install.clone(), - Step::Workload => match build.workload.clone() { + Phase::Prepare => Some(prepare_script(&recipe.parsed.upstreams)), + Phase::Setup => build.setup.clone(), + Phase::Build => build.build.clone(), + Phase::Check => build.check.clone(), + Phase::Install => build.install.clone(), + Phase::Workload => match build.workload.clone() { Some(mut content) => { if matches!(recipe.parsed.options.toolchain, Toolchain::Llvm) { if matches!(pgo_stage, Some(pgo::Stage::One)) { @@ -108,7 +108,7 @@ impl Step { let mut env = build .environment .as_deref() - .filter(|env| *env != "(null)" && !env.is_empty() && !matches!(self, Step::Prepare)) + .filter(|env| *env != "(null)" && !env.is_empty() && !matches!(self, Phase::Prepare)) .unwrap_or_default() .to_string(); env = format!("%scriptBase\n{env}\n"); @@ -117,7 +117,7 @@ impl Step { let build_target = target.to_string(); let build_dir = paths.build().guest.join(&build_target); - let work_dir = if matches!(self, Step::Prepare) { + let work_dir = if matches!(self, Phase::Prepare) { build_dir.clone() } else { work_dir(&build_dir, &recipe.parsed.upstreams) diff --git a/crates/boulder/src/cli/chroot.rs b/crates/boulder/src/cli/chroot.rs index 22060695a..a8c0bdf0c 100644 --- a/crates/boulder/src/cli/chroot.rs +++ b/crates/boulder/src/cli/chroot.rs @@ -40,10 +40,10 @@ pub fn handle(command: Command, env: Env) -> Result<(), Error> { // to the container environment with all actions // and definitions // - // The step doesn't matter, but we use `prepare` + // The phase doesn't matter, but we use `prepare` // since it uses hardcoded content that's always // available to create a script from - let script = build::job::Step::Prepare + let script = build::job::Phase::Prepare .script( BuildTarget::Native(architecture::host()), None, @@ -53,7 +53,7 @@ pub fn handle(command: Command, env: Env) -> Result<(), Error> { false, ) .map_err(Error::BuildScript)? - .expect("script always available for prepare step"); + .expect("script always available for prepare phase"); let profile = &build::format_profile(&script); let home = &paths.build().guest; From 023e618428501397138f0cb4e5a424b16aafe71e Mon Sep 17 00:00:00 2001 From: Cory Forsstrom Date: Mon, 26 Feb 2024 13:26:33 -0800 Subject: [PATCH 12/26] Fix manifest mispelling --- crates/boulder/src/package/emit.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/boulder/src/package/emit.rs b/crates/boulder/src/package/emit.rs index 6632f9f01..c50813b33 100644 --- a/crates/boulder/src/package/emit.rs +++ b/crates/boulder/src/package/emit.rs @@ -88,20 +88,20 @@ impl<'a> Package<'a> { } pub fn emit(paths: &Paths, recipe: &Recipe, packages: &[Package]) -> Result<(), Error> { - let mut manfiest = Manifest::new(paths, recipe, architecture::host()); + let mut manifest = Manifest::new(paths, recipe, architecture::host()); println!("Emitting packages\n"); for package in packages { if !package.is_dbginfo() { - manfiest.add_package(package); + manifest.add_package(package); } emit_package(paths, package)?; } - manfiest.write_binary()?; - manfiest.write_json()?; + manifest.write_binary()?; + manifest.write_json()?; Ok(()) } From 3a82906b2dfdf1ec60333222777db22e90f55989 Mon Sep 17 00:00:00 2001 From: Fabio Forni Date: Wed, 28 Feb 2024 14:54:56 +0100 Subject: [PATCH 13/26] Move apps to the top level directory --- Cargo.toml | 5 ++-- {crates/boulder => boulder}/Cargo.toml | 10 ++++---- .../data/macros/actions/autotools.yml | 0 .../data/macros/actions/cmake.yml | 0 .../data/macros/actions/meson.yml | 0 .../data/macros/actions/misc.yml | 0 .../data/macros/actions/pgo.yml | 0 .../data/macros/actions/python.yml | 0 .../data/macros/arch/aarch64.yml | 0 .../data/macros/arch/base.yml | 0 .../data/macros/arch/emul32/x86_64.yml | 0 .../data/macros/arch/x86.yml | 0 .../data/macros/arch/x86_64-stage1.yml | 0 .../data/macros/arch/x86_64.yml | 0 .../data/profile.d/default-x86_64.yaml | 0 .../boulder => boulder}/src/architecture.rs | 0 {crates/boulder => boulder}/src/build.rs | 0 {crates/boulder => boulder}/src/build/job.rs | 0 .../src/build/job/phase.rs | 0 {crates/boulder => boulder}/src/build/pgo.rs | 0 {crates/boulder => boulder}/src/build/root.rs | 0 .../boulder => boulder}/src/build/upstream.rs | 0 {crates/boulder => boulder}/src/cli.rs | 0 {crates/boulder => boulder}/src/cli/build.rs | 0 {crates/boulder => boulder}/src/cli/chroot.rs | 0 .../boulder => boulder}/src/cli/profile.rs | 0 {crates/boulder => boulder}/src/cli/recipe.rs | 0 {crates/boulder => boulder}/src/container.rs | 0 {crates/boulder => boulder}/src/env.rs | 0 {crates/boulder => boulder}/src/lib.rs | 0 {crates/boulder => boulder}/src/macros.rs | 0 {crates/boulder => boulder}/src/main.rs | 0 {crates/boulder => boulder}/src/package.rs | 0 .../src/package/analysis.rs | 0 .../src/package/analysis/handler.rs | 0 .../src/package/analysis/handler/elf.rs | 0 .../src/package/collect.rs | 0 .../boulder => boulder}/src/package/emit.rs | 0 .../src/package/emit/manifest.rs | 0 .../src/package/emit/manifest/binary.rs | 0 .../src/package/emit/manifest/json.rs | 0 {crates/boulder => boulder}/src/paths.rs | 0 {crates/boulder => boulder}/src/profile.rs | 0 {crates/boulder => boulder}/src/recipe.rs | 0 {crates/boulder => boulder}/src/runtime.rs | 0 {crates/boulder => boulder}/src/util.rs | 0 {crates/moss => moss}/Cargo.toml | 16 ++++++------- {crates/moss => moss}/build.rs | 0 {crates/moss => moss}/src/cli/extract.rs | 0 {crates/moss => moss}/src/cli/index.rs | 0 {crates/moss => moss}/src/cli/info.rs | 0 {crates/moss => moss}/src/cli/inspect.rs | 0 {crates/moss => moss}/src/cli/install.rs | 0 {crates/moss => moss}/src/cli/list.rs | 0 {crates/moss => moss}/src/cli/mod.rs | 0 {crates/moss => moss}/src/cli/remove.rs | 0 {crates/moss => moss}/src/cli/repo.rs | 0 {crates/moss => moss}/src/cli/state.rs | 0 {crates/moss => moss}/src/cli/sync.rs | 0 {crates/moss => moss}/src/cli/version.rs | 0 {crates/moss => moss}/src/client/cache.rs | 0 {crates/moss => moss}/src/client/install.rs | 0 {crates/moss => moss}/src/client/mod.rs | 0 {crates/moss => moss}/src/client/postblit.rs | 0 {crates/moss => moss}/src/client/prune.rs | 0 {crates/moss => moss}/src/config.rs | 0 .../layout/migrations/20230919225204_init.sql | 0 {crates/moss => moss}/src/db/layout/mod.rs | 6 ++--- .../meta/migrations/20230912204438_init.sql | 0 {crates/moss => moss}/src/db/meta/mod.rs | 24 +++++++++---------- {crates/moss => moss}/src/db/mod.rs | 0 .../state/migrations/20230912172712_init.sql | 0 {crates/moss => moss}/src/db/state/mod.rs | 0 {crates/moss => moss}/src/dependency.rs | 0 {crates/moss => moss}/src/environment.rs | 0 {crates/moss => moss}/src/installation.rs | 0 {crates/moss => moss}/src/lib.rs | 0 {crates/moss => moss}/src/main.rs | 0 {crates/moss => moss}/src/package/meta.rs | 0 {crates/moss => moss}/src/package/mod.rs | 0 {crates/moss => moss}/src/package/render.rs | 0 {crates/moss => moss}/src/registry/job.rs | 0 {crates/moss => moss}/src/registry/mod.rs | 0 .../src/registry/plugin/active.rs | 0 .../src/registry/plugin/cobble.rs | 0 .../moss => moss}/src/registry/plugin/mod.rs | 0 .../src/registry/plugin/repository.rs | 0 .../moss => moss}/src/registry/transaction.rs | 0 .../moss => moss}/src/repository/manager.rs | 0 {crates/moss => moss}/src/repository/mod.rs | 0 {crates/moss => moss}/src/request.rs | 0 {crates/moss => moss}/src/state.rs | 0 {crates/moss => moss}/src/stone.rs | 0 93 files changed, 31 insertions(+), 30 deletions(-) rename {crates/boulder => boulder}/Cargo.toml (75%) rename {crates/boulder => boulder}/data/macros/actions/autotools.yml (100%) rename {crates/boulder => boulder}/data/macros/actions/cmake.yml (100%) rename {crates/boulder => boulder}/data/macros/actions/meson.yml (100%) rename {crates/boulder => boulder}/data/macros/actions/misc.yml (100%) rename {crates/boulder => boulder}/data/macros/actions/pgo.yml (100%) rename {crates/boulder => boulder}/data/macros/actions/python.yml (100%) rename {crates/boulder => boulder}/data/macros/arch/aarch64.yml (100%) rename {crates/boulder => boulder}/data/macros/arch/base.yml (100%) rename {crates/boulder => boulder}/data/macros/arch/emul32/x86_64.yml (100%) rename {crates/boulder => boulder}/data/macros/arch/x86.yml (100%) rename {crates/boulder => boulder}/data/macros/arch/x86_64-stage1.yml (100%) rename {crates/boulder => boulder}/data/macros/arch/x86_64.yml (100%) rename {crates/boulder => boulder}/data/profile.d/default-x86_64.yaml (100%) rename {crates/boulder => boulder}/src/architecture.rs (100%) rename {crates/boulder => boulder}/src/build.rs (100%) rename {crates/boulder => boulder}/src/build/job.rs (100%) rename {crates/boulder => boulder}/src/build/job/phase.rs (100%) rename {crates/boulder => boulder}/src/build/pgo.rs (100%) rename {crates/boulder => boulder}/src/build/root.rs (100%) rename {crates/boulder => boulder}/src/build/upstream.rs (100%) rename {crates/boulder => boulder}/src/cli.rs (100%) rename {crates/boulder => boulder}/src/cli/build.rs (100%) rename {crates/boulder => boulder}/src/cli/chroot.rs (100%) rename {crates/boulder => boulder}/src/cli/profile.rs (100%) rename {crates/boulder => boulder}/src/cli/recipe.rs (100%) rename {crates/boulder => boulder}/src/container.rs (100%) rename {crates/boulder => boulder}/src/env.rs (100%) rename {crates/boulder => boulder}/src/lib.rs (100%) rename {crates/boulder => boulder}/src/macros.rs (100%) rename {crates/boulder => boulder}/src/main.rs (100%) rename {crates/boulder => boulder}/src/package.rs (100%) rename {crates/boulder => boulder}/src/package/analysis.rs (100%) rename {crates/boulder => boulder}/src/package/analysis/handler.rs (100%) rename {crates/boulder => boulder}/src/package/analysis/handler/elf.rs (100%) rename {crates/boulder => boulder}/src/package/collect.rs (100%) rename {crates/boulder => boulder}/src/package/emit.rs (100%) rename {crates/boulder => boulder}/src/package/emit/manifest.rs (100%) rename {crates/boulder => boulder}/src/package/emit/manifest/binary.rs (100%) rename {crates/boulder => boulder}/src/package/emit/manifest/json.rs (100%) rename {crates/boulder => boulder}/src/paths.rs (100%) rename {crates/boulder => boulder}/src/profile.rs (100%) rename {crates/boulder => boulder}/src/recipe.rs (100%) rename {crates/boulder => boulder}/src/runtime.rs (100%) rename {crates/boulder => boulder}/src/util.rs (100%) rename {crates/moss => moss}/Cargo.toml (64%) rename {crates/moss => moss}/build.rs (100%) rename {crates/moss => moss}/src/cli/extract.rs (100%) rename {crates/moss => moss}/src/cli/index.rs (100%) rename {crates/moss => moss}/src/cli/info.rs (100%) rename {crates/moss => moss}/src/cli/inspect.rs (100%) rename {crates/moss => moss}/src/cli/install.rs (100%) rename {crates/moss => moss}/src/cli/list.rs (100%) rename {crates/moss => moss}/src/cli/mod.rs (100%) rename {crates/moss => moss}/src/cli/remove.rs (100%) rename {crates/moss => moss}/src/cli/repo.rs (100%) rename {crates/moss => moss}/src/cli/state.rs (100%) rename {crates/moss => moss}/src/cli/sync.rs (100%) rename {crates/moss => moss}/src/cli/version.rs (100%) rename {crates/moss => moss}/src/client/cache.rs (100%) rename {crates/moss => moss}/src/client/install.rs (100%) rename {crates/moss => moss}/src/client/mod.rs (100%) rename {crates/moss => moss}/src/client/postblit.rs (100%) rename {crates/moss => moss}/src/client/prune.rs (100%) rename {crates/moss => moss}/src/config.rs (100%) rename {crates/moss => moss}/src/db/layout/migrations/20230919225204_init.sql (100%) rename {crates/moss => moss}/src/db/layout/mod.rs (98%) rename {crates/moss => moss}/src/db/meta/migrations/20230912204438_init.sql (100%) rename {crates/moss => moss}/src/db/meta/mod.rs (97%) rename {crates/moss => moss}/src/db/mod.rs (100%) rename {crates/moss => moss}/src/db/state/migrations/20230912172712_init.sql (100%) rename {crates/moss => moss}/src/db/state/mod.rs (100%) rename {crates/moss => moss}/src/dependency.rs (100%) rename {crates/moss => moss}/src/environment.rs (100%) rename {crates/moss => moss}/src/installation.rs (100%) rename {crates/moss => moss}/src/lib.rs (100%) rename {crates/moss => moss}/src/main.rs (100%) rename {crates/moss => moss}/src/package/meta.rs (100%) rename {crates/moss => moss}/src/package/mod.rs (100%) rename {crates/moss => moss}/src/package/render.rs (100%) rename {crates/moss => moss}/src/registry/job.rs (100%) rename {crates/moss => moss}/src/registry/mod.rs (100%) rename {crates/moss => moss}/src/registry/plugin/active.rs (100%) rename {crates/moss => moss}/src/registry/plugin/cobble.rs (100%) rename {crates/moss => moss}/src/registry/plugin/mod.rs (100%) rename {crates/moss => moss}/src/registry/plugin/repository.rs (100%) rename {crates/moss => moss}/src/registry/transaction.rs (100%) rename {crates/moss => moss}/src/repository/manager.rs (100%) rename {crates/moss => moss}/src/repository/mod.rs (100%) rename {crates/moss => moss}/src/request.rs (100%) rename {crates/moss => moss}/src/state.rs (100%) rename {crates/moss => moss}/src/stone.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index 6647c2859..b56573bf5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,9 +1,11 @@ [workspace] members = [ + "boulder", + "moss", "crates/*", ] default-members = [ - "crates/moss" + "moss" ] resolver = "2" @@ -56,4 +58,3 @@ opt-level = 3 # allow packaging system to do it strip = "none" debug = true - diff --git a/crates/boulder/Cargo.toml b/boulder/Cargo.toml similarity index 75% rename from crates/boulder/Cargo.toml rename to boulder/Cargo.toml index 59dfcda4b..5fd7616c6 100644 --- a/crates/boulder/Cargo.toml +++ b/boulder/Cargo.toml @@ -6,12 +6,12 @@ edition.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -config = { path = "../config" } -container = { path = "../container" } +config = { path = "../crates/config" } +container = { path = "../crates/container" } moss = { path = "../moss" } -stone_recipe = { path = "../stone_recipe" } -tui = { path = "../tui" } -yaml = { path = "../yaml" } +stone_recipe = { path = "../crates/stone_recipe" } +tui = { path = "../crates/tui" } +yaml = { path = "../crates/yaml" } clap.workspace = true dirs.workspace = true diff --git a/crates/boulder/data/macros/actions/autotools.yml b/boulder/data/macros/actions/autotools.yml similarity index 100% rename from crates/boulder/data/macros/actions/autotools.yml rename to boulder/data/macros/actions/autotools.yml diff --git a/crates/boulder/data/macros/actions/cmake.yml b/boulder/data/macros/actions/cmake.yml similarity index 100% rename from crates/boulder/data/macros/actions/cmake.yml rename to boulder/data/macros/actions/cmake.yml diff --git a/crates/boulder/data/macros/actions/meson.yml b/boulder/data/macros/actions/meson.yml similarity index 100% rename from crates/boulder/data/macros/actions/meson.yml rename to boulder/data/macros/actions/meson.yml diff --git a/crates/boulder/data/macros/actions/misc.yml b/boulder/data/macros/actions/misc.yml similarity index 100% rename from crates/boulder/data/macros/actions/misc.yml rename to boulder/data/macros/actions/misc.yml diff --git a/crates/boulder/data/macros/actions/pgo.yml b/boulder/data/macros/actions/pgo.yml similarity index 100% rename from crates/boulder/data/macros/actions/pgo.yml rename to boulder/data/macros/actions/pgo.yml diff --git a/crates/boulder/data/macros/actions/python.yml b/boulder/data/macros/actions/python.yml similarity index 100% rename from crates/boulder/data/macros/actions/python.yml rename to boulder/data/macros/actions/python.yml diff --git a/crates/boulder/data/macros/arch/aarch64.yml b/boulder/data/macros/arch/aarch64.yml similarity index 100% rename from crates/boulder/data/macros/arch/aarch64.yml rename to boulder/data/macros/arch/aarch64.yml diff --git a/crates/boulder/data/macros/arch/base.yml b/boulder/data/macros/arch/base.yml similarity index 100% rename from crates/boulder/data/macros/arch/base.yml rename to boulder/data/macros/arch/base.yml diff --git a/crates/boulder/data/macros/arch/emul32/x86_64.yml b/boulder/data/macros/arch/emul32/x86_64.yml similarity index 100% rename from crates/boulder/data/macros/arch/emul32/x86_64.yml rename to boulder/data/macros/arch/emul32/x86_64.yml diff --git a/crates/boulder/data/macros/arch/x86.yml b/boulder/data/macros/arch/x86.yml similarity index 100% rename from crates/boulder/data/macros/arch/x86.yml rename to boulder/data/macros/arch/x86.yml diff --git a/crates/boulder/data/macros/arch/x86_64-stage1.yml b/boulder/data/macros/arch/x86_64-stage1.yml similarity index 100% rename from crates/boulder/data/macros/arch/x86_64-stage1.yml rename to boulder/data/macros/arch/x86_64-stage1.yml diff --git a/crates/boulder/data/macros/arch/x86_64.yml b/boulder/data/macros/arch/x86_64.yml similarity index 100% rename from crates/boulder/data/macros/arch/x86_64.yml rename to boulder/data/macros/arch/x86_64.yml diff --git a/crates/boulder/data/profile.d/default-x86_64.yaml b/boulder/data/profile.d/default-x86_64.yaml similarity index 100% rename from crates/boulder/data/profile.d/default-x86_64.yaml rename to boulder/data/profile.d/default-x86_64.yaml diff --git a/crates/boulder/src/architecture.rs b/boulder/src/architecture.rs similarity index 100% rename from crates/boulder/src/architecture.rs rename to boulder/src/architecture.rs diff --git a/crates/boulder/src/build.rs b/boulder/src/build.rs similarity index 100% rename from crates/boulder/src/build.rs rename to boulder/src/build.rs diff --git a/crates/boulder/src/build/job.rs b/boulder/src/build/job.rs similarity index 100% rename from crates/boulder/src/build/job.rs rename to boulder/src/build/job.rs diff --git a/crates/boulder/src/build/job/phase.rs b/boulder/src/build/job/phase.rs similarity index 100% rename from crates/boulder/src/build/job/phase.rs rename to boulder/src/build/job/phase.rs diff --git a/crates/boulder/src/build/pgo.rs b/boulder/src/build/pgo.rs similarity index 100% rename from crates/boulder/src/build/pgo.rs rename to boulder/src/build/pgo.rs diff --git a/crates/boulder/src/build/root.rs b/boulder/src/build/root.rs similarity index 100% rename from crates/boulder/src/build/root.rs rename to boulder/src/build/root.rs diff --git a/crates/boulder/src/build/upstream.rs b/boulder/src/build/upstream.rs similarity index 100% rename from crates/boulder/src/build/upstream.rs rename to boulder/src/build/upstream.rs diff --git a/crates/boulder/src/cli.rs b/boulder/src/cli.rs similarity index 100% rename from crates/boulder/src/cli.rs rename to boulder/src/cli.rs diff --git a/crates/boulder/src/cli/build.rs b/boulder/src/cli/build.rs similarity index 100% rename from crates/boulder/src/cli/build.rs rename to boulder/src/cli/build.rs diff --git a/crates/boulder/src/cli/chroot.rs b/boulder/src/cli/chroot.rs similarity index 100% rename from crates/boulder/src/cli/chroot.rs rename to boulder/src/cli/chroot.rs diff --git a/crates/boulder/src/cli/profile.rs b/boulder/src/cli/profile.rs similarity index 100% rename from crates/boulder/src/cli/profile.rs rename to boulder/src/cli/profile.rs diff --git a/crates/boulder/src/cli/recipe.rs b/boulder/src/cli/recipe.rs similarity index 100% rename from crates/boulder/src/cli/recipe.rs rename to boulder/src/cli/recipe.rs diff --git a/crates/boulder/src/container.rs b/boulder/src/container.rs similarity index 100% rename from crates/boulder/src/container.rs rename to boulder/src/container.rs diff --git a/crates/boulder/src/env.rs b/boulder/src/env.rs similarity index 100% rename from crates/boulder/src/env.rs rename to boulder/src/env.rs diff --git a/crates/boulder/src/lib.rs b/boulder/src/lib.rs similarity index 100% rename from crates/boulder/src/lib.rs rename to boulder/src/lib.rs diff --git a/crates/boulder/src/macros.rs b/boulder/src/macros.rs similarity index 100% rename from crates/boulder/src/macros.rs rename to boulder/src/macros.rs diff --git a/crates/boulder/src/main.rs b/boulder/src/main.rs similarity index 100% rename from crates/boulder/src/main.rs rename to boulder/src/main.rs diff --git a/crates/boulder/src/package.rs b/boulder/src/package.rs similarity index 100% rename from crates/boulder/src/package.rs rename to boulder/src/package.rs diff --git a/crates/boulder/src/package/analysis.rs b/boulder/src/package/analysis.rs similarity index 100% rename from crates/boulder/src/package/analysis.rs rename to boulder/src/package/analysis.rs diff --git a/crates/boulder/src/package/analysis/handler.rs b/boulder/src/package/analysis/handler.rs similarity index 100% rename from crates/boulder/src/package/analysis/handler.rs rename to boulder/src/package/analysis/handler.rs diff --git a/crates/boulder/src/package/analysis/handler/elf.rs b/boulder/src/package/analysis/handler/elf.rs similarity index 100% rename from crates/boulder/src/package/analysis/handler/elf.rs rename to boulder/src/package/analysis/handler/elf.rs diff --git a/crates/boulder/src/package/collect.rs b/boulder/src/package/collect.rs similarity index 100% rename from crates/boulder/src/package/collect.rs rename to boulder/src/package/collect.rs diff --git a/crates/boulder/src/package/emit.rs b/boulder/src/package/emit.rs similarity index 100% rename from crates/boulder/src/package/emit.rs rename to boulder/src/package/emit.rs diff --git a/crates/boulder/src/package/emit/manifest.rs b/boulder/src/package/emit/manifest.rs similarity index 100% rename from crates/boulder/src/package/emit/manifest.rs rename to boulder/src/package/emit/manifest.rs diff --git a/crates/boulder/src/package/emit/manifest/binary.rs b/boulder/src/package/emit/manifest/binary.rs similarity index 100% rename from crates/boulder/src/package/emit/manifest/binary.rs rename to boulder/src/package/emit/manifest/binary.rs diff --git a/crates/boulder/src/package/emit/manifest/json.rs b/boulder/src/package/emit/manifest/json.rs similarity index 100% rename from crates/boulder/src/package/emit/manifest/json.rs rename to boulder/src/package/emit/manifest/json.rs diff --git a/crates/boulder/src/paths.rs b/boulder/src/paths.rs similarity index 100% rename from crates/boulder/src/paths.rs rename to boulder/src/paths.rs diff --git a/crates/boulder/src/profile.rs b/boulder/src/profile.rs similarity index 100% rename from crates/boulder/src/profile.rs rename to boulder/src/profile.rs diff --git a/crates/boulder/src/recipe.rs b/boulder/src/recipe.rs similarity index 100% rename from crates/boulder/src/recipe.rs rename to boulder/src/recipe.rs diff --git a/crates/boulder/src/runtime.rs b/boulder/src/runtime.rs similarity index 100% rename from crates/boulder/src/runtime.rs rename to boulder/src/runtime.rs diff --git a/crates/boulder/src/util.rs b/boulder/src/util.rs similarity index 100% rename from crates/boulder/src/util.rs rename to boulder/src/util.rs diff --git a/crates/moss/Cargo.toml b/moss/Cargo.toml similarity index 64% rename from crates/moss/Cargo.toml rename to moss/Cargo.toml index d80df8dba..781720580 100644 --- a/crates/moss/Cargo.toml +++ b/moss/Cargo.toml @@ -4,11 +4,11 @@ version = "0.1.0" edition.workspace = true [dependencies] -config = { path = "../config" } -dag = { path = "../dag" } -stone = { path = "../stone" } -tui = { path = "../tui" } -vfs = { path = "../vfs" } +config = { path = "../crates/config" } +dag = { path = "../crates/dag" } +stone = { path = "../crates/stone" } +tui = { path = "../crates/tui" } +vfs = { path = "../crates/vfs" } bitflags.workspace = true bytes.workspace = true @@ -29,8 +29,8 @@ sqlx.workspace = true tokio.workspace = true tokio-stream.workspace = true tokio-util.workspace = true -thiserror.workspace = true +thiserror.workspace = true url.workspace = true xxhash-rust.workspace = true -triggers = { version = "0.1.0", path = "../triggers" } -container = { version = "0.1.0", path = "../container" } +triggers = { version = "0.1.0", path = "../crates/triggers" } +container = { version = "0.1.0", path = "../crates/container" } diff --git a/crates/moss/build.rs b/moss/build.rs similarity index 100% rename from crates/moss/build.rs rename to moss/build.rs diff --git a/crates/moss/src/cli/extract.rs b/moss/src/cli/extract.rs similarity index 100% rename from crates/moss/src/cli/extract.rs rename to moss/src/cli/extract.rs diff --git a/crates/moss/src/cli/index.rs b/moss/src/cli/index.rs similarity index 100% rename from crates/moss/src/cli/index.rs rename to moss/src/cli/index.rs diff --git a/crates/moss/src/cli/info.rs b/moss/src/cli/info.rs similarity index 100% rename from crates/moss/src/cli/info.rs rename to moss/src/cli/info.rs diff --git a/crates/moss/src/cli/inspect.rs b/moss/src/cli/inspect.rs similarity index 100% rename from crates/moss/src/cli/inspect.rs rename to moss/src/cli/inspect.rs diff --git a/crates/moss/src/cli/install.rs b/moss/src/cli/install.rs similarity index 100% rename from crates/moss/src/cli/install.rs rename to moss/src/cli/install.rs diff --git a/crates/moss/src/cli/list.rs b/moss/src/cli/list.rs similarity index 100% rename from crates/moss/src/cli/list.rs rename to moss/src/cli/list.rs diff --git a/crates/moss/src/cli/mod.rs b/moss/src/cli/mod.rs similarity index 100% rename from crates/moss/src/cli/mod.rs rename to moss/src/cli/mod.rs diff --git a/crates/moss/src/cli/remove.rs b/moss/src/cli/remove.rs similarity index 100% rename from crates/moss/src/cli/remove.rs rename to moss/src/cli/remove.rs diff --git a/crates/moss/src/cli/repo.rs b/moss/src/cli/repo.rs similarity index 100% rename from crates/moss/src/cli/repo.rs rename to moss/src/cli/repo.rs diff --git a/crates/moss/src/cli/state.rs b/moss/src/cli/state.rs similarity index 100% rename from crates/moss/src/cli/state.rs rename to moss/src/cli/state.rs diff --git a/crates/moss/src/cli/sync.rs b/moss/src/cli/sync.rs similarity index 100% rename from crates/moss/src/cli/sync.rs rename to moss/src/cli/sync.rs diff --git a/crates/moss/src/cli/version.rs b/moss/src/cli/version.rs similarity index 100% rename from crates/moss/src/cli/version.rs rename to moss/src/cli/version.rs diff --git a/crates/moss/src/client/cache.rs b/moss/src/client/cache.rs similarity index 100% rename from crates/moss/src/client/cache.rs rename to moss/src/client/cache.rs diff --git a/crates/moss/src/client/install.rs b/moss/src/client/install.rs similarity index 100% rename from crates/moss/src/client/install.rs rename to moss/src/client/install.rs diff --git a/crates/moss/src/client/mod.rs b/moss/src/client/mod.rs similarity index 100% rename from crates/moss/src/client/mod.rs rename to moss/src/client/mod.rs diff --git a/crates/moss/src/client/postblit.rs b/moss/src/client/postblit.rs similarity index 100% rename from crates/moss/src/client/postblit.rs rename to moss/src/client/postblit.rs diff --git a/crates/moss/src/client/prune.rs b/moss/src/client/prune.rs similarity index 100% rename from crates/moss/src/client/prune.rs rename to moss/src/client/prune.rs diff --git a/crates/moss/src/config.rs b/moss/src/config.rs similarity index 100% rename from crates/moss/src/config.rs rename to moss/src/config.rs diff --git a/crates/moss/src/db/layout/migrations/20230919225204_init.sql b/moss/src/db/layout/migrations/20230919225204_init.sql similarity index 100% rename from crates/moss/src/db/layout/migrations/20230919225204_init.sql rename to moss/src/db/layout/migrations/20230919225204_init.sql diff --git a/crates/moss/src/db/layout/mod.rs b/moss/src/db/layout/mod.rs similarity index 98% rename from crates/moss/src/db/layout/mod.rs rename to moss/src/db/layout/mod.rs index 1cf681c62..e8ad0a17e 100644 --- a/crates/moss/src/db/layout/mod.rs +++ b/moss/src/db/layout/mod.rs @@ -116,7 +116,7 @@ impl Database { sqlx::QueryBuilder::new( " - INSERT INTO layout + INSERT INTO layout ( package_id, uid, @@ -166,7 +166,7 @@ impl Database { let mut query = sqlx::QueryBuilder::new( " DELETE FROM layout - WHERE package_id IN ( + WHERE package_id IN ( ", ); @@ -307,7 +307,7 @@ mod test { .await .unwrap(); - let bash_completion = include_bytes!("../../../../../test/bash-completion-2.11-1-1-x86_64.stone"); + let bash_completion = include_bytes!("../../../../test/bash-completion-2.11-1-1-x86_64.stone"); let mut stone = stone::read_bytes(bash_completion).unwrap(); diff --git a/crates/moss/src/db/meta/migrations/20230912204438_init.sql b/moss/src/db/meta/migrations/20230912204438_init.sql similarity index 100% rename from crates/moss/src/db/meta/migrations/20230912204438_init.sql rename to moss/src/db/meta/migrations/20230912204438_init.sql diff --git a/crates/moss/src/db/meta/mod.rs b/moss/src/db/meta/mod.rs similarity index 97% rename from crates/moss/src/db/meta/mod.rs rename to moss/src/db/meta/mod.rs index 77f10fed2..299c4ed97 100644 --- a/crates/moss/src/db/meta/mod.rs +++ b/moss/src/db/meta/mod.rs @@ -38,7 +38,7 @@ impl Filter { query .push( " - where provider = + where provider = ", ) .push_bind(p.encode()); @@ -46,8 +46,8 @@ impl Filter { query .push( " - where package in - (select distinct package from meta_providers where provider = + where package in + (select distinct package from meta_providers where provider = ", ) .push_bind(p.encode()) @@ -59,7 +59,7 @@ impl Filter { query .push( " - where dependency = + where dependency = ", ) .push_bind(d.encode()); @@ -67,8 +67,8 @@ impl Filter { query .push( " - where package in - (select distinct package from meta_dependencies where dependency = + where package in + (select distinct package from meta_dependencies where dependency = ", ) .push_bind(d.encode()) @@ -80,7 +80,7 @@ impl Filter { query .push( " - where name = + where name = ", ) .push_bind(n.encode().to_string()); @@ -88,8 +88,8 @@ impl Filter { query .push( " - where package in - (select distinct package from meta where name = + where package in + (select distinct package from meta where name = ", ) .push_bind(n.encode().to_string()) @@ -244,7 +244,7 @@ impl Database { let entry_query = sqlx::query_as::<_, encoding::Entry>( " - SELECT package, + SELECT package, name, version_identifier, source_release, @@ -356,7 +356,7 @@ impl Database { homepage, uri, hash, - download_size + download_size ) ", ) @@ -575,7 +575,7 @@ mod test { .await .unwrap(); - let bash_completion = include_bytes!("../../../../../test/bash-completion-2.11-1-1-x86_64.stone"); + let bash_completion = include_bytes!("../../../../test/bash-completion-2.11-1-1-x86_64.stone"); let mut stone = stone::read_bytes(bash_completion).unwrap(); diff --git a/crates/moss/src/db/mod.rs b/moss/src/db/mod.rs similarity index 100% rename from crates/moss/src/db/mod.rs rename to moss/src/db/mod.rs diff --git a/crates/moss/src/db/state/migrations/20230912172712_init.sql b/moss/src/db/state/migrations/20230912172712_init.sql similarity index 100% rename from crates/moss/src/db/state/migrations/20230912172712_init.sql rename to moss/src/db/state/migrations/20230912172712_init.sql diff --git a/crates/moss/src/db/state/mod.rs b/moss/src/db/state/mod.rs similarity index 100% rename from crates/moss/src/db/state/mod.rs rename to moss/src/db/state/mod.rs diff --git a/crates/moss/src/dependency.rs b/moss/src/dependency.rs similarity index 100% rename from crates/moss/src/dependency.rs rename to moss/src/dependency.rs diff --git a/crates/moss/src/environment.rs b/moss/src/environment.rs similarity index 100% rename from crates/moss/src/environment.rs rename to moss/src/environment.rs diff --git a/crates/moss/src/installation.rs b/moss/src/installation.rs similarity index 100% rename from crates/moss/src/installation.rs rename to moss/src/installation.rs diff --git a/crates/moss/src/lib.rs b/moss/src/lib.rs similarity index 100% rename from crates/moss/src/lib.rs rename to moss/src/lib.rs diff --git a/crates/moss/src/main.rs b/moss/src/main.rs similarity index 100% rename from crates/moss/src/main.rs rename to moss/src/main.rs diff --git a/crates/moss/src/package/meta.rs b/moss/src/package/meta.rs similarity index 100% rename from crates/moss/src/package/meta.rs rename to moss/src/package/meta.rs diff --git a/crates/moss/src/package/mod.rs b/moss/src/package/mod.rs similarity index 100% rename from crates/moss/src/package/mod.rs rename to moss/src/package/mod.rs diff --git a/crates/moss/src/package/render.rs b/moss/src/package/render.rs similarity index 100% rename from crates/moss/src/package/render.rs rename to moss/src/package/render.rs diff --git a/crates/moss/src/registry/job.rs b/moss/src/registry/job.rs similarity index 100% rename from crates/moss/src/registry/job.rs rename to moss/src/registry/job.rs diff --git a/crates/moss/src/registry/mod.rs b/moss/src/registry/mod.rs similarity index 100% rename from crates/moss/src/registry/mod.rs rename to moss/src/registry/mod.rs diff --git a/crates/moss/src/registry/plugin/active.rs b/moss/src/registry/plugin/active.rs similarity index 100% rename from crates/moss/src/registry/plugin/active.rs rename to moss/src/registry/plugin/active.rs diff --git a/crates/moss/src/registry/plugin/cobble.rs b/moss/src/registry/plugin/cobble.rs similarity index 100% rename from crates/moss/src/registry/plugin/cobble.rs rename to moss/src/registry/plugin/cobble.rs diff --git a/crates/moss/src/registry/plugin/mod.rs b/moss/src/registry/plugin/mod.rs similarity index 100% rename from crates/moss/src/registry/plugin/mod.rs rename to moss/src/registry/plugin/mod.rs diff --git a/crates/moss/src/registry/plugin/repository.rs b/moss/src/registry/plugin/repository.rs similarity index 100% rename from crates/moss/src/registry/plugin/repository.rs rename to moss/src/registry/plugin/repository.rs diff --git a/crates/moss/src/registry/transaction.rs b/moss/src/registry/transaction.rs similarity index 100% rename from crates/moss/src/registry/transaction.rs rename to moss/src/registry/transaction.rs diff --git a/crates/moss/src/repository/manager.rs b/moss/src/repository/manager.rs similarity index 100% rename from crates/moss/src/repository/manager.rs rename to moss/src/repository/manager.rs diff --git a/crates/moss/src/repository/mod.rs b/moss/src/repository/mod.rs similarity index 100% rename from crates/moss/src/repository/mod.rs rename to moss/src/repository/mod.rs diff --git a/crates/moss/src/request.rs b/moss/src/request.rs similarity index 100% rename from crates/moss/src/request.rs rename to moss/src/request.rs diff --git a/crates/moss/src/state.rs b/moss/src/state.rs similarity index 100% rename from crates/moss/src/state.rs rename to moss/src/state.rs diff --git a/crates/moss/src/stone.rs b/moss/src/stone.rs similarity index 100% rename from crates/moss/src/stone.rs rename to moss/src/stone.rs From 51eeebc357393e025fa20577b3baa7da49f51632 Mon Sep 17 00:00:00 2001 From: Cory Forsstrom Date: Tue, 27 Feb 2024 10:06:32 -0800 Subject: [PATCH 14/26] Simplify DB conversions --- Cargo.lock | 36 ++++++++++ Cargo.toml | 1 + moss/Cargo.toml | 2 + moss/src/client/cache.rs | 2 +- moss/src/db/layout/mod.rs | 17 +++-- moss/src/db/meta/mod.rs | 82 ++++++++++----------- moss/src/db/mod.rs | 145 -------------------------------------- moss/src/db/state/mod.rs | 43 +++++------ moss/src/dependency.rs | 16 +++++ moss/src/package/meta.rs | 37 ++-------- moss/src/package/mod.rs | 22 +----- moss/src/state.rs | 26 ++----- 12 files changed, 141 insertions(+), 288 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 941ce32dc..b458c9835 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -387,6 +387,12 @@ dependencies = [ "thiserror", ] +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + [[package]] name = "core-foundation" version = "0.9.4" @@ -556,6 +562,19 @@ dependencies = [ "zeroize", ] +[[package]] +name = "derive_more" +version = "0.99.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +dependencies = [ + "convert_case", + "proc-macro2", + "quote", + "rustc_version", + "syn 1.0.109", +] + [[package]] name = "dialoguer" version = "0.11.0" @@ -1280,6 +1299,7 @@ dependencies = [ "config", "container", "dag", + "derive_more", "futures", "hex", "itertools 0.12.0", @@ -1293,6 +1313,7 @@ dependencies = [ "sha2", "sqlx", "stone", + "strum", "thiserror", "tokio", "tokio-stream", @@ -1748,6 +1769,15 @@ version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +[[package]] +name = "rustc_version" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +dependencies = [ + "semver", +] + [[package]] name = "rustix" version = "0.38.30" @@ -1829,6 +1859,12 @@ dependencies = [ "untrusted", ] +[[package]] +name = "semver" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" + [[package]] name = "serde" version = "1.0.196" diff --git a/Cargo.toml b/Cargo.toml index b56573bf5..dcf0e3c22 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,6 +18,7 @@ bytes = "1.5.0" chrono = "0.4.30" clap = { version = "4.4.11", features = ["derive", "string"] } crossterm = "0.27.0" +derive_more = "0.99" dialoguer = "0.11.0" dirs = "5.0" elf = "0.7.4" diff --git a/moss/Cargo.toml b/moss/Cargo.toml index 781720580..e9043761d 100644 --- a/moss/Cargo.toml +++ b/moss/Cargo.toml @@ -14,6 +14,7 @@ bitflags.workspace = true bytes.workspace = true chrono.workspace = true clap.workspace = true +derive_more.workspace = true itertools.workspace = true futures.workspace = true hex.workspace = true @@ -25,6 +26,7 @@ reqwest.workspace = true serde.workspace = true serde_yaml.workspace = true sha2.workspace = true +strum.workspace = true sqlx.workspace = true tokio.workspace = true tokio-stream.workspace = true diff --git a/moss/src/client/cache.rs b/moss/src/client/cache.rs index fb87f7523..4fa4a4660 100644 --- a/moss/src/client/cache.rs +++ b/moss/src/client/cache.rs @@ -174,7 +174,7 @@ impl Download { task::spawn_blocking(move || { let content_dir = self.installation.cache_path("content"); - let content_path = content_dir.join(self.id.as_ref()); + let content_path = content_dir.join(self.id); create_dir_all(&content_dir)?; diff --git a/moss/src/db/layout/mod.rs b/moss/src/db/layout/mod.rs index e8ad0a17e..7decce993 100644 --- a/moss/src/db/layout/mod.rs +++ b/moss/src/db/layout/mod.rs @@ -6,15 +6,13 @@ use std::collections::HashSet; use sqlx::sqlite::SqliteConnectOptions; use sqlx::{Pool, Sqlite}; -use stone::payload::{self}; +use stone::payload; use thiserror::Error; use tokio::sync::Mutex; use crate::package; use crate::Installation; -use super::Encoding; - #[derive(Debug)] pub struct Database { pool: Mutex>, @@ -76,7 +74,7 @@ impl Database { let entry = encoding::decode_entry(entry_type, entry_value1, entry_value2)?; Some(( - package_id.0, + package_id, payload::Layout { uid, gid, @@ -140,7 +138,7 @@ impl Database { let (entry_type, entry_value1, entry_value2) = encoding::encode_entry(entry); - b.push_bind(id.encode().to_owned()) + b.push_bind(id.to_string()) .push_bind(uid) .push_bind(gid) .push_bind(mode) @@ -172,7 +170,7 @@ impl Database { let mut separated = query.separated(", "); packages.into_iter().for_each(|pkg| { - separated.push_bind(pkg.encode()); + separated.push_bind(pkg.to_string()); }); separated.push_unseparated(");"); @@ -196,7 +194,7 @@ impl Database { entry_value2 FROM layout WHERE package_id = ?", ) - .bind(package.encode()); + .bind(package.to_string()); let layouts = query.fetch_all(&*pool).await?; @@ -240,11 +238,12 @@ mod encoding { use sqlx::FromRow; use stone::payload; - use crate::{db::Decoder, package}; + use crate::package; #[derive(FromRow)] pub struct Layout { - pub package_id: Decoder, + #[sqlx(try_from = "String")] + pub package_id: package::Id, pub uid: u32, pub gid: u32, pub mode: u32, diff --git a/moss/src/db/meta/mod.rs b/moss/src/db/meta/mod.rs index 299c4ed97..0319b6fc4 100644 --- a/moss/src/db/meta/mod.rs +++ b/moss/src/db/meta/mod.rs @@ -11,7 +11,6 @@ use sqlx::{Executor, QueryBuilder}; use thiserror::Error; use tokio::sync::Mutex; -use crate::db::Encoding; use crate::package::{self, Meta}; use crate::{Dependency, Provider}; @@ -41,7 +40,7 @@ impl Filter { where provider = ", ) - .push_bind(p.encode()); + .push_bind(p.to_string()); } else { query .push( @@ -50,7 +49,7 @@ impl Filter { (select distinct package from meta_providers where provider = ", ) - .push_bind(p.encode()) + .push_bind(p.to_string()) .push(")"); } } @@ -62,7 +61,7 @@ impl Filter { where dependency = ", ) - .push_bind(d.encode()); + .push_bind(d.to_string()); } else { query .push( @@ -71,7 +70,7 @@ impl Filter { (select distinct package from meta_dependencies where dependency = ", ) - .push_bind(d.encode()) + .push_bind(d.to_string()) .push(")"); } } @@ -83,7 +82,7 @@ impl Filter { where name = ", ) - .push_bind(n.encode().to_string()); + .push_bind(n.to_string()); } else { query .push( @@ -92,7 +91,7 @@ impl Filter { (select distinct package from meta where name = ", ) - .push_bind(n.encode().to_string()) + .push_bind(n.to_string()) .push(")"); } } @@ -204,9 +203,9 @@ impl Database { .into_iter() .map(|entry| { ( - entry.id.0.clone(), + entry.id.clone(), Meta { - name: entry.name.0, + name: entry.name, version_identifier: entry.version_identifier, source_release: entry.source_release as u64, build_release: entry.build_release as u64, @@ -217,18 +216,18 @@ impl Database { homepage: entry.homepage, licenses: licenses .iter() - .filter(|l| l.id.0 == entry.id.0) + .filter(|l| l.id == entry.id) .map(|l| l.license.clone()) .collect(), dependencies: dependencies .iter() - .filter(|l| l.id.0 == entry.id.0) - .map(|d| d.dependency.0.clone()) + .filter(|l| l.id == entry.id) + .map(|d| d.dependency.clone()) .collect(), providers: providers .iter() - .filter(|l| l.id.0 == entry.id.0) - .map(|p| p.provider.0.clone()) + .filter(|l| l.id == entry.id) + .map(|p| p.provider.clone()) .collect(), uri: entry.uri, hash: entry.hash, @@ -261,7 +260,7 @@ impl Database { WHERE package = ?; ", ) - .bind(package.encode()); + .bind(package.to_string()); let licenses_query = sqlx::query_as::<_, encoding::License>( " @@ -270,7 +269,7 @@ impl Database { WHERE package = ?; ", ) - .bind(package.encode()); + .bind(package.to_string()); let dependencies_query = sqlx::query_as::<_, encoding::Dependency>( " @@ -279,7 +278,7 @@ impl Database { WHERE package = ?; ", ) - .bind(package.encode()); + .bind(package.to_string()); let providers_query = sqlx::query_as::<_, encoding::Provider>( " @@ -288,7 +287,7 @@ impl Database { WHERE package = ?; ", ) - .bind(package.encode()); + .bind(package.to_string()); let entry = entry_query.fetch_one(&*pool).await?; let licenses = licenses_query.fetch_all(&*pool).await?; @@ -296,7 +295,7 @@ impl Database { let providers = providers_query.fetch_all(&*pool).await?; Ok(Meta { - name: entry.name.0, + name: entry.name, version_identifier: entry.version_identifier, source_release: entry.source_release as u64, build_release: entry.build_release as u64, @@ -306,8 +305,8 @@ impl Database { source_id: entry.source_id, homepage: entry.homepage, licenses: licenses.into_iter().map(|l| l.license).collect(), - dependencies: dependencies.into_iter().map(|d| d.dependency.0).collect(), - providers: providers.into_iter().map(|p| p.provider.0).collect(), + dependencies: dependencies.into_iter().map(|d| d.dependency).collect(), + providers: providers.into_iter().map(|p| p.provider).collect(), uri: entry.uri, hash: entry.hash, download_size: entry.download_size.map(|i| i as u64), @@ -377,8 +376,8 @@ impl Database { .. } = meta; - b.push_bind(id.encode()) - .push_bind(name.encode()) + b.push_bind(id.to_string()) + .push_bind(name.to_string()) .push_bind(version_identifier) .push_bind(*source_release as i64) .push_bind(*build_release as i64) @@ -407,7 +406,7 @@ impl Database { ", ) .push_values(licenses, |mut b, (id, license)| { - b.push_bind(id.encode()).push_bind(license); + b.push_bind(id.to_string()).push_bind(license); }) .build() .execute(transaction.acquire().await?) @@ -426,7 +425,7 @@ impl Database { ", ) .push_values(dependencies, |mut b, (id, dependency)| { - b.push_bind(id.encode()).push_bind(dependency.encode()); + b.push_bind(id.to_string()).push_bind(dependency.to_string()); }) .build() .execute(transaction.acquire().await?) @@ -445,7 +444,7 @@ impl Database { ", ) .push_values(providers, |mut b, (id, provider)| { - b.push_bind(id.encode()).push_bind(provider.encode()); + b.push_bind(id.to_string()).push_bind(provider.to_string()); }) .build() .execute(transaction.acquire().await?) @@ -480,7 +479,7 @@ async fn batch_remove_impl<'a>( let mut separated = query_builder.separated(", "); packages.into_iter().for_each(|package| { - separated.push_bind(package.encode()); + separated.push_bind(package.to_string()); }); separated.push_unseparated(");"); @@ -511,14 +510,14 @@ impl From for Error { mod encoding { use sqlx::FromRow; - use crate::db::Decoder; use crate::package; #[derive(FromRow)] pub struct Entry { - #[sqlx(rename = "package")] - pub id: Decoder, - pub name: Decoder, + #[sqlx(rename = "package", try_from = "String")] + pub id: package::Id, + #[sqlx(try_from = "String")] + pub name: package::Name, pub version_identifier: String, pub source_release: i64, pub build_release: i64, @@ -534,28 +533,31 @@ mod encoding { #[derive(FromRow)] pub struct License { - #[sqlx(rename = "package")] - pub id: Decoder, + #[sqlx(rename = "package", try_from = "String")] + pub id: package::Id, pub license: String, } #[derive(FromRow)] pub struct Dependency { - #[sqlx(rename = "package")] - pub id: Decoder, - pub dependency: Decoder, + #[sqlx(rename = "package", try_from = "String")] + pub id: package::Id, + #[sqlx(try_from = "&'a str")] + pub dependency: crate::Dependency, } #[derive(FromRow)] pub struct Provider { - #[sqlx(rename = "package")] - pub id: Decoder, - pub provider: Decoder, + #[sqlx(rename = "package", try_from = "String")] + pub id: package::Id, + #[sqlx(try_from = "&'a str")] + pub provider: crate::Provider, } #[derive(FromRow)] pub struct ProviderPackage { - pub package: Decoder, + #[sqlx(try_from = "String")] + pub package: package::Id, } } diff --git a/moss/src/db/mod.rs b/moss/src/db/mod.rs index b53d30120..75ef4538f 100644 --- a/moss/src/db/mod.rs +++ b/moss/src/db/mod.rs @@ -2,151 +2,6 @@ // // SPDX-License-Identifier: MPL-2.0 -pub use self::encoding::{Decoder, Encoding}; - pub mod layout; pub mod meta; pub mod state; - -mod encoding { - //! Decode from sql types to rust types - use std::convert::Infallible; - - use sqlx::{Sqlite, Type}; - use thiserror::Error; - - use crate::{dependency, package, state, Dependency, Provider}; - - /// Decode from a database type using [`Encoding::decode`] - #[derive(Debug, Clone, Copy)] - pub struct Decoder(pub T); - - /// A trait to define an encoding between a sql type and rust type - pub trait Encoding<'a>: Sized { - type Encoded: ToOwned; - type Error; - - fn decode(encoded: Self::Encoded) -> Result; - fn encode(&'a self) -> Self::Encoded; - } - - impl<'r, T, U, E> sqlx::Decode<'r, Sqlite> for Decoder - where - T: Encoding<'r, Encoded = U, Error = E>, - U: sqlx::Decode<'r, Sqlite> + ToOwned, - E: std::error::Error + Send + Sync + 'static, - { - fn decode( - value: >::ValueRef, - ) -> Result { - Ok(T::decode(U::decode(value)?).map(Decoder)?) - } - } - - impl Type for Decoder - where - T: Encoding<'static, Encoded = U, Error = E>, - U: ToOwned + Type, - { - fn type_info() -> ::TypeInfo { - U::type_info() - } - - fn compatible(ty: &::TypeInfo) -> bool { - U::compatible(ty) - } - } - - /** Encoding on external types */ - - /// Encoding of package identity (String) - impl<'a> Encoding<'a> for package::Id { - type Encoded = &'a str; - type Error = Infallible; - - fn decode(encoded: &'a str) -> Result { - Ok(package::Id::from(encoded.to_owned())) - } - - fn encode(&'a self) -> &'a str { - self.as_ref() - } - } - - /// Encoding of package name (String) - impl<'a> Encoding<'a> for package::Name { - type Encoded = &'a str; - type Error = Infallible; - - fn decode(encoded: &'a str) -> Result { - Ok(package::Name::from(encoded.to_owned())) - } - - fn encode(&'a self) -> &'a str { - self.as_ref() - } - } - - /// Encoding of Dependency type - impl<'a> Encoding<'a> for Dependency { - type Encoded = String; - type Error = dependency::ParseError; - - fn decode(encoded: String) -> Result { - encoded.parse() - } - - fn encode(&self) -> String { - self.to_string() - } - } - - /// Encoding of Provider type - impl<'a> Encoding<'a> for Provider { - type Encoded = String; - type Error = dependency::ParseError; - - fn decode(encoded: String) -> Result { - encoded.parse() - } - - fn encode(&self) -> String { - self.to_string() - } - } - - impl<'a> Encoding<'a> for state::Id { - type Encoded = i64; - type Error = Infallible; - - fn decode(value: i64) -> Result { - Ok(Self::from(value)) - } - - fn encode(&self) -> i64 { - (*self).into() - } - } - - impl<'a> Encoding<'a> for state::Kind { - type Encoded = &'a str; - type Error = DecodeStateKindError; - - fn decode(value: &'a str) -> Result { - match value { - "transaction" => Ok(Self::Transaction), - _ => Err(DecodeStateKindError(value.to_string())), - } - } - - fn encode(&self) -> Self::Encoded { - match self { - state::Kind::Transaction => "transaction", - } - } - } - - #[derive(Debug, Error)] - #[error("Invalid state type: {0}")] - pub struct DecodeStateKindError(String); -} diff --git a/moss/src/db/state/mod.rs b/moss/src/db/state/mod.rs index 02dd2fbf9..d01e94e22 100644 --- a/moss/src/db/state/mod.rs +++ b/moss/src/db/state/mod.rs @@ -8,7 +8,6 @@ use sqlx::{Acquire, Executor, Pool, Sqlite}; use thiserror::Error; use tokio::sync::Mutex; -use crate::db::Encoding; use crate::state::{self, Id, Selection}; use crate::{Installation, State}; @@ -50,7 +49,7 @@ impl Database { .fetch_all(&*pool) .await?; - Ok(states.into_iter().map(|state| (state.id.0, state.created)).collect()) + Ok(states.into_iter().map(|state| (state.id, state.created)).collect()) } pub async fn get(&self, id: &Id) -> Result { @@ -63,7 +62,7 @@ impl Database { WHERE id = ?; ", ) - .bind(id.encode()); + .bind(i64::from(*id)); let selections_query = sqlx::query_as::<_, encoding::Selection>( " SELECT package_id, @@ -73,7 +72,7 @@ impl Database { WHERE state_id = ?; ", ) - .bind(id.encode()); + .bind(i64::from(*id)); let state = state_query.fetch_one(&*pool).await?; let selections_rows = selections_query.fetch_all(&*pool).await?; @@ -81,19 +80,19 @@ impl Database { let selections = selections_rows .into_iter() .map(|row| Selection { - package: row.package_id.0, + package: row.package_id, explicit: row.explicit, reason: row.reason, }) .collect(); Ok(State { - id: state.id.0, + id: state.id, summary: state.summary, description: state.description, selections, created: state.created, - kind: state.kind.0, + kind: state.kind, }) } @@ -113,7 +112,7 @@ impl Database { RETURNING id; ", ) - .bind(state::Kind::Transaction.encode()) + .bind(state::Kind::Transaction.to_string()) .bind(summary) .bind(description) .fetch_one(transaction.acquire().await?) @@ -128,8 +127,8 @@ impl Database { ", ) .push_values(selections, |mut b, selection| { - b.push_bind(id.0.encode()) - .push_bind(selection.package.encode()) + b.push_bind(i64::from(id)) + .push_bind(selection.package.to_string()) .push_bind(selection.explicit) .push_bind(selection.reason.as_ref()); }) @@ -141,7 +140,7 @@ impl Database { transaction.commit().await?; drop(pool); - let state = self.get(&id.0).await?; + let state = self.get(&id).await?; Ok(state) } @@ -162,7 +161,7 @@ impl Database { let mut separated = query.separated(", "); states.into_iter().for_each(|id| { - separated.push_bind(id.encode()); + separated.push_bind(i64::from(*id)); }); separated.push_unseparated(");"); @@ -184,20 +183,22 @@ mod encoding { use chrono::{DateTime, Utc}; use sqlx::FromRow; - use super::{state, Id}; - use crate::{db::Decoder, package}; + use crate::package; + use crate::state::{self, Id}; #[derive(FromRow)] pub struct Created { - pub id: Decoder, + #[sqlx(try_from = "i64")] + pub id: Id, pub created: DateTime, } #[derive(FromRow)] pub struct State { - pub id: Decoder, - #[sqlx(rename = "type")] - pub kind: Decoder, + #[sqlx(try_from = "i64")] + pub id: Id, + #[sqlx(rename = "type", try_from = "&'a str")] + pub kind: state::Kind, pub created: DateTime, pub summary: Option, pub description: Option, @@ -205,12 +206,14 @@ mod encoding { #[derive(FromRow)] pub struct StateId { - pub id: Decoder, + #[sqlx(try_from = "i64")] + pub id: Id, } #[derive(FromRow)] pub struct Selection { - pub package_id: Decoder, + #[sqlx(try_from = "String")] + pub package_id: package::Id, pub explicit: bool, pub reason: Option, } diff --git a/moss/src/dependency.rs b/moss/src/dependency.rs index 7d80762cc..e58f9b047 100644 --- a/moss/src/dependency.rs +++ b/moss/src/dependency.rs @@ -160,6 +160,14 @@ impl FromStr for Dependency { } } +impl<'a> TryFrom<&'a str> for Dependency { + type Error = ParseError; + + fn try_from(value: &'a str) -> Result { + Self::from_str(value) + } +} + #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct Provider { pub kind: Kind, @@ -207,6 +215,14 @@ impl FromStr for Provider { } } +impl<'a> TryFrom<&'a str> for Provider { + type Error = ParseError; + + fn try_from(value: &'a str) -> Result { + Self::from_str(value) + } +} + fn parse(s: &str) -> Result<(Kind, String), ParseError> { let (kind, rest) = s.split_once('(').ok_or(ParseError(s.to_string()))?; diff --git a/moss/src/package/meta.rs b/moss/src/package/meta.rs index 3fdd2a574..8d7413f5b 100644 --- a/moss/src/package/meta.rs +++ b/moss/src/package/meta.rs @@ -2,51 +2,22 @@ // // SPDX-License-Identifier: MPL-2.0 -use std::{collections::BTreeSet, fmt}; +use std::collections::BTreeSet; +use derive_more::{AsRef, Display, From, Into}; use stone::payload; use thiserror::Error; use crate::{dependency, Dependency, Provider}; /// A package identifier constructed from metadata fields -#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Display)] pub struct Id(pub(super) String); -impl fmt::Display for Id { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} - /// The name of a [`Package`] -#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, AsRef, From, Into, Display)] pub struct Name(String); -impl From for Name { - fn from(name: String) -> Self { - Self(name) - } -} - -impl From for String { - fn from(name: Name) -> Self { - name.0 - } -} - -impl AsRef for Name { - fn as_ref(&self) -> &str { - &self.0 - } -} - -impl fmt::Display for Name { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} - /// The metadata of a [`Package`] #[derive(Debug, Clone, PartialEq, Eq)] pub struct Meta { diff --git a/moss/src/package/mod.rs b/moss/src/package/mod.rs index e918d7f9b..0b7b0ea65 100644 --- a/moss/src/package/mod.rs +++ b/moss/src/package/mod.rs @@ -3,6 +3,7 @@ // SPDX-License-Identifier: MPL-2.0 use bitflags::bitflags; +use derive_more::{AsRef, Display, From, Into}; use itertools::Itertools; pub use self::meta::{Meta, MissingMetaFieldError, Name}; @@ -11,27 +12,10 @@ pub mod meta; pub mod render; /// Unique ID of a [`Package`] -#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, From, Into, AsRef, Display)] +#[as_ref(forward)] pub struct Id(String); -impl From for Id { - fn from(id: String) -> Self { - Self(id) - } -} - -impl From for String { - fn from(id: Id) -> Self { - id.0 - } -} - -impl AsRef for Id { - fn as_ref(&self) -> &str { - self.0.as_str() - } -} - impl From for meta::Id { fn from(id: Id) -> Self { meta::Id(id.0) diff --git a/moss/src/state.rs b/moss/src/state.rs index fe9ddbe86..bc0aab526 100644 --- a/moss/src/state.rs +++ b/moss/src/state.rs @@ -2,15 +2,16 @@ // // SPDX-License-Identifier: MPL-2.0 -use std::{fmt, io::Write}; +use std::io::Write; use chrono::{DateTime, Utc}; +use derive_more::{Display, From, Into}; use tui::{pretty, Stylize}; use crate::package; /// Unique identifier for [`State`] -#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, From, Into, Display)] pub struct Id(i64); impl Id { @@ -19,27 +20,10 @@ impl Id { } } -impl From for Id { - fn from(id: i64) -> Self { - Id(id) - } -} - -impl From for i64 { - fn from(id: Id) -> Self { - id.0 - } -} - -impl fmt::Display for Id { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} - /// State types -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, strum::Display, strum::EnumString)] #[repr(u8)] +#[strum(serialize_all = "kebab-case")] pub enum Kind { /// Automatically constructed state Transaction, From 2a730ff25cf0d360d8d2c6fdc89f61f8944b74a4 Mon Sep 17 00:00:00 2001 From: Cory Forsstrom Date: Tue, 27 Feb 2024 10:34:32 -0800 Subject: [PATCH 15/26] Use strum / derive_more everywhere --- Cargo.lock | 3 ++ boulder/Cargo.toml | 1 + boulder/src/architecture.rs | 15 ++----- boulder/src/profile.rs | 11 ++---- crates/container/Cargo.toml | 1 + crates/container/src/idmap.rs | 16 ++------ crates/stone/Cargo.toml | 1 + crates/stone/src/payload/meta.rs | 28 +++---------- moss/src/dependency.rs | 67 ++++++-------------------------- moss/src/installation.rs | 14 ++----- moss/src/repository/mod.rs | 31 ++------------- 11 files changed, 42 insertions(+), 146 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b458c9835..4560b5fb3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -197,6 +197,7 @@ dependencies = [ "clap", "config", "container", + "derive_more", "dirs", "elf", "futures", @@ -384,6 +385,7 @@ name = "container" version = "0.1.0" dependencies = [ "nix", + "strum", "thiserror", ] @@ -2256,6 +2258,7 @@ name = "stone" version = "0.1.0" dependencies = [ "criterion", + "strum", "thiserror", "xxhash-rust", "zstd", diff --git a/boulder/Cargo.toml b/boulder/Cargo.toml index 5fd7616c6..155b8ff6e 100644 --- a/boulder/Cargo.toml +++ b/boulder/Cargo.toml @@ -14,6 +14,7 @@ tui = { path = "../crates/tui" } yaml = { path = "../crates/yaml" } clap.workspace = true +derive_more.workspace = true dirs.workspace = true elf.workspace = true glob.workspace = true diff --git a/boulder/src/architecture.rs b/boulder/src/architecture.rs index 08e7754e4..9b604b9cc 100644 --- a/boulder/src/architecture.rs +++ b/boulder/src/architecture.rs @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: MPL-2.0 -use std::fmt; +use derive_more::Display; pub const fn host() -> Architecture { #[cfg(target_arch = "x86_64")] @@ -37,9 +37,11 @@ impl Architecture { } } -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Display)] pub enum BuildTarget { + #[display(fmt = "{_0}")] Native(Architecture), + #[display(fmt = "emul32/{_0}")] Emul32(Architecture), } @@ -55,12 +57,3 @@ impl BuildTarget { } } } - -impl fmt::Display for BuildTarget { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - BuildTarget::Native(arch) => write!(f, "{arch}"), - BuildTarget::Emul32(arch) => write!(f, "emul32/{arch}"), - } - } -} diff --git a/boulder/src/profile.rs b/boulder/src/profile.rs index e1085c5d8..31601693b 100644 --- a/boulder/src/profile.rs +++ b/boulder/src/profile.rs @@ -2,9 +2,10 @@ // // SPDX-License-Identifier: MPL-2.0 -use std::{collections::HashMap, fmt}; +use std::collections::HashMap; use config::Config; +use derive_more::Display; use moss::repository; pub use moss::{repository::Priority, Repository}; use serde::{Deserialize, Serialize}; @@ -13,7 +14,7 @@ use thiserror::Error; use crate::Env; /// A unique [`Profile`] identifier -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, Display)] #[serde(from = "String")] pub struct Id(String); @@ -28,12 +29,6 @@ impl Id { } } -impl fmt::Display for Id { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} - impl From for Id { fn from(value: String) -> Self { Self::new(value) diff --git a/crates/container/Cargo.toml b/crates/container/Cargo.toml index 00ee6c48c..7c7641432 100644 --- a/crates/container/Cargo.toml +++ b/crates/container/Cargo.toml @@ -7,4 +7,5 @@ edition.workspace = true [dependencies] nix.workspace = true +strum.workspace = true thiserror.workspace = true diff --git a/crates/container/src/idmap.rs b/crates/container/src/idmap.rs index ff753ff71..a8f0eb403 100644 --- a/crates/container/src/idmap.rs +++ b/crates/container/src/idmap.rs @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: MPL-2.0 -use std::{fmt, fs, process::Command}; +use std::{fs, process::Command}; use nix::unistd::{getgid, getuid, Pid, User}; use thiserror::Error; @@ -24,22 +24,14 @@ pub fn idmap(pid: Pid) -> Result<(), Error> { Ok(()) } -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone, Copy, strum::Display)] pub enum Kind { + #[strum(serialize = "uid")] User, + #[strum(serialize = "gid")] Group, } -impl fmt::Display for Kind { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Kind::User => "uid", - Kind::Group => "gid", - } - .fmt(f) - } -} - fn load_sub_mappings(kind: Kind, id: u32, username: &str) -> Result, Error> { let Ok(content) = fs::read_to_string(format!("/etc/sub{kind}")) else { ensure_sub_count(kind, id, &[])?; diff --git a/crates/stone/Cargo.toml b/crates/stone/Cargo.toml index 1e69d3ab3..f8eee587a 100644 --- a/crates/stone/Cargo.toml +++ b/crates/stone/Cargo.toml @@ -6,6 +6,7 @@ edition.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +strum.workspace = true thiserror.workspace = true xxhash-rust.workspace = true zstd.workspace = true diff --git a/crates/stone/src/payload/meta.rs b/crates/stone/src/payload/meta.rs index 2b04c7dbe..2bd1244ee 100644 --- a/crates/stone/src/payload/meta.rs +++ b/crates/stone/src/payload/meta.rs @@ -2,10 +2,7 @@ // // SPDX-License-Identifier: MPL-2.0 -use std::{ - fmt::Display, - io::{Read, Write}, -}; +use std::io::{Read, Write}; use super::{DecodeError, EncodeError, Record}; use crate::{ReadExt, WriteExt}; @@ -21,12 +18,15 @@ pub struct Meta { } #[repr(u8)] -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, strum::Display)] +#[strum(serialize_all = "lowercase")] pub enum Dependency { /// Just the plain name of a package + #[strum(serialize = "name")] PackageName = 0, /// A soname based dependency + #[strum(serialize = "soname")] SharedLibary, /// A pkgconfig `.pc` based dependency @@ -45,29 +45,13 @@ pub enum Dependency { Binary, /// A binary in /usr/sbin + #[strum(serialize = "sysbinary")] SystemBinary, /// An emul32-compatible pkgconfig .pc dependency (lib32/*.pc) PkgConfig32, } -/// Override display for `pkgconfig32(name)` style strings -impl Display for Dependency { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Dependency::PackageName => write!(f, "name"), - Dependency::SharedLibary => write!(f, "soname"), - Dependency::PkgConfig => write!(f, "pkgconfig"), - Dependency::Interpreter => write!(f, "interpreter"), - Dependency::CMake => write!(f, "cmake"), - Dependency::Python => write!(f, "python"), - Dependency::Binary => write!(f, "binary"), - Dependency::SystemBinary => write!(f, "sysbinary"), - Dependency::PkgConfig32 => write!(f, "pkgconfig32"), - } - } -} - #[repr(u8)] #[derive(Debug, Clone, PartialEq, Eq)] pub enum Kind { diff --git a/moss/src/dependency.rs b/moss/src/dependency.rs index e58f9b047..89dd5a8a1 100644 --- a/moss/src/dependency.rs +++ b/moss/src/dependency.rs @@ -2,17 +2,21 @@ // // SPDX-License-Identifier: MPL-2.0 -use std::{fmt, str::FromStr}; +use std::str::FromStr; +use derive_more::Display; use stone::payload; use thiserror::Error; -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumString)] +#[strum(serialize_all = "lowercase")] pub enum Kind { /// Name based dependency + #[strum(serialize = "name")] PackageName, /// Shared library (soname) + #[strum(serialize = "soname")] SharedLibary, /// Exported pkg-config provider @@ -31,49 +35,13 @@ pub enum Kind { Binary, /// Executable in /usr/sbin + #[strum(serialize = "sysbinary")] SystemBinary, /// Exported 32-bit pkgconfig provider PkgConfig32, } -/// Custom pretty-print, i.e `pkgconfig(zlib)` -impl fmt::Display for Kind { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Kind::PackageName => write!(f, "name"), - Kind::SharedLibary => write!(f, "soname"), - Kind::PkgConfig => write!(f, "pkgconfig"), - Kind::Interpreter => write!(f, "interpreter"), - Kind::CMake => write!(f, "cmake"), - Kind::Python => write!(f, "python"), - Kind::Binary => write!(f, "binary"), - Kind::SystemBinary => write!(f, "sysbinary"), - Kind::PkgConfig32 => write!(f, "pkgconfig32"), - } - } -} - -/// Decode a name into a Kind (yaml helper) -impl FromStr for Kind { - type Err = ParseError; - - fn from_str(s: &str) -> Result { - Ok(match s { - "name" => Kind::PackageName, - "soname" => Kind::SharedLibary, - "pkgconfig" => Kind::PkgConfig, - "interpreter" => Kind::Interpreter, - "cmake" => Kind::CMake, - "python" => Kind::Python, - "binary" => Kind::Binary, - "sysbinary" => Kind::SystemBinary, - "pkgconfig32" => Kind::PkgConfig32, - _ => return Err(ParseError(s.to_string())), - }) - } -} - /// Convert payload dependency types to our internal representation impl From for Kind { fn from(dependency: payload::meta::Dependency) -> Self { @@ -109,7 +77,8 @@ impl From for payload::meta::Dependency { /// A Dependency in moss is simplistic in that it only contains /// a target and a Kind, ie. `pkgconfig(zlib)` -#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Display)] +#[display(fmt = "{kind}({name})")] pub struct Dependency { /// Tag for the table-type of dependency pub kind: Kind, @@ -143,13 +112,6 @@ impl Ord for Dependency { } } -/// Pretty-printing of dependencies (e.g.: `binary(whoami)`) -impl fmt::Display for Dependency { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}({})", self.kind, self.name) - } -} - impl FromStr for Dependency { type Err = ParseError; @@ -168,7 +130,8 @@ impl<'a> TryFrom<&'a str> for Dependency { } } -#[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Display)] +#[display(fmt = "{kind}({name})")] pub struct Provider { pub kind: Kind, pub name: String, @@ -199,12 +162,6 @@ impl Ord for Provider { } } -impl fmt::Display for Provider { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}({})", self.kind, self.name) - } -} - impl FromStr for Provider { type Err = ParseError; @@ -230,7 +187,7 @@ fn parse(s: &str) -> Result<(Kind, String), ParseError> { return Err(ParseError(s.to_string())); } - let kind = kind.parse()?; + let kind = kind.parse::().map_err(|e| ParseError(e.to_string()))?; // Safe since we checked `ends_with(')')` let name = rest[0..rest.len() - 1].to_string(); diff --git a/moss/src/installation.rs b/moss/src/installation.rs index 5e1cd348a..f28c91237 100644 --- a/moss/src/installation.rs +++ b/moss/src/installation.rs @@ -3,7 +3,7 @@ // SPDX-License-Identifier: MPL-2.0 use std::{ - fmt, fs, + fs, path::{Path, PathBuf}, }; @@ -13,7 +13,8 @@ use nix::unistd::{access, AccessFlags, Uid}; use crate::state; /// System mutability - do we have readwrite? -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, strum::Display)] +#[strum(serialize_all = "kebab-case")] pub enum Mutability { /// We only have readonly access ReadOnly, @@ -21,15 +22,6 @@ pub enum Mutability { ReadWrite, } -impl fmt::Display for Mutability { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Mutability::ReadOnly => "read-only".fmt(f), - Mutability::ReadWrite => "read-write".fmt(f), - } - } -} - /// An Installation is a general encapsulation pattern for a root filesystem /// as seen from moss. /// We're largely active in the mutability, path builders and the potential active diff --git a/moss/src/repository/mod.rs b/moss/src/repository/mod.rs index 5ee21ae1d..8f784d7d1 100644 --- a/moss/src/repository/mod.rs +++ b/moss/src/repository/mod.rs @@ -2,9 +2,10 @@ // // SPDX-License-Identifier: MPL-2.0 -use std::{collections::HashMap, fmt, path::Path}; +use std::{collections::HashMap, path::Path}; use config::Config; +use derive_more::{Display, From, Into}; use futures::StreamExt; use serde::{Deserialize, Serialize}; use thiserror::Error; @@ -21,7 +22,7 @@ pub use self::manager::Manager; pub mod manager; /// A unique [`Repository`] identifier -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, From, Display)] #[serde(from = "String")] pub struct Id(String); @@ -36,18 +37,6 @@ impl Id { } } -impl fmt::Display for Id { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} - -impl From for Id { - fn from(value: String) -> Self { - Self::new(value) - } -} - /// Repository configuration data #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Repository { @@ -66,7 +55,7 @@ pub struct Active { } /// The selection priority of a [`Repository`] -#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Display, Into)] pub struct Priority(u64); impl Priority { @@ -75,18 +64,6 @@ impl Priority { } } -impl fmt::Display for Priority { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} - -impl From for u64 { - fn from(priority: Priority) -> Self { - priority.0 - } -} - impl PartialOrd for Priority { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) From 1025687d5d722f3185c533b5ef44826c8961fd89 Mon Sep 17 00:00:00 2001 From: Rune Morling Date: Wed, 28 Feb 2024 17:27:11 +0100 Subject: [PATCH 16/26] boulder: Add %cargo_* actions Signed-off-by: Rune Morling --- boulder/data/macros/actions/cargo.yml | 38 +++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 boulder/data/macros/actions/cargo.yml diff --git a/boulder/data/macros/actions/cargo.yml b/boulder/data/macros/actions/cargo.yml new file mode 100644 index 000000000..89c4cf911 --- /dev/null +++ b/boulder/data/macros/actions/cargo.yml @@ -0,0 +1,38 @@ +actions: + # Fetch dependencies + - cargo_fetch: + command: | + cargo fetch -v --locked + dependencies: + - rust + + # Build the rust project + - cargo_build: + command: | + cargo build -v -j "%(jobs)" --frozen --release --target %(target_triple) \ + --config profile.release.debug=\"full\" \ + --config profile.release.split-debuginfo=\"off\" \ + --config profile.release.strip=\"none\" + dependencies: + - rust + + # Install the built binary + - cargo_install: + command: | + cargo_install(){ + if [ $# -eq 1 ]; then + %install_bin target/%(target_triple)/release/"$1" + else + %install_bin target/%(target_triple)/release/%(name) + fi + } + cargo_install + dependencies: + - rust + + # Run tests + - cargo_test: + command: | + cargo test -v -j "%(jobs)" --frozen --release --target %(target_triple) --workspace + dependencies: + - rust From 2ef50fcddb27c44c1f9515356a1e1fd32850186a Mon Sep 17 00:00:00 2001 From: Rune Morling Date: Wed, 28 Feb 2024 18:13:37 +0100 Subject: [PATCH 17/26] boulder: Bring data/ up to parity with boulder-d Note that a new x86_64-v3x.yml target definition was added in preparation for the point where we will begin building the scaffolding to use it. Signed-off-by: Rune Morling --- boulder/data/macros/actions/cmake.yml | 8 +++++++ boulder/data/macros/actions/meson.yml | 6 +++++ boulder/data/macros/arch/aarch64.yml | 1 + boulder/data/macros/arch/base.yml | 26 ++++++++++++++++++++-- boulder/data/macros/arch/emul32/x86_64.yml | 1 + boulder/data/macros/arch/x86.yml | 1 + boulder/data/macros/arch/x86_64-stage1.yml | 3 ++- boulder/data/macros/arch/x86_64-v3x.yml | 20 +++++++++++++++++ boulder/data/macros/arch/x86_64.yml | 3 +++ boulder/data/recipeTemplate.yml | 14 ++++++++++++ 10 files changed, 80 insertions(+), 3 deletions(-) create mode 100644 boulder/data/macros/arch/x86_64-v3x.yml create mode 100644 boulder/data/recipeTemplate.yml diff --git a/boulder/data/macros/actions/cmake.yml b/boulder/data/macros/actions/cmake.yml index ca58ca5f2..93b3068eb 100644 --- a/boulder/data/macros/actions/cmake.yml +++ b/boulder/data/macros/actions/cmake.yml @@ -28,6 +28,14 @@ actions: dependencies: - ninja + # Run testsuite with ctest + - cmake_test: + command: | + ninja test -v -j "%(jobs)" -C "%(builddir)" + dependencies: + - cmake + - ninja + definitions: # Default cmake options as passed to cmake diff --git a/boulder/data/macros/actions/meson.yml b/boulder/data/macros/actions/meson.yml index b7465ed79..f0390bd48 100644 --- a/boulder/data/macros/actions/meson.yml +++ b/boulder/data/macros/actions/meson.yml @@ -30,6 +30,12 @@ actions: dependencies: - meson + - meson_test: + command: | + meson test --no-rebuild --print-errorlogs -j "%(jobs)" -C "%(builddir)" + dependencies: + - meson + definitions: # Default meson options as passed to meson diff --git a/boulder/data/macros/arch/aarch64.yml b/boulder/data/macros/arch/aarch64.yml index 2aaa5e718..74903529f 100644 --- a/boulder/data/macros/arch/aarch64.yml +++ b/boulder/data/macros/arch/aarch64.yml @@ -10,6 +10,7 @@ definitions: - cpp : "%(compiler_cpp) -m64" - march : armv8-a+simd+fp+crypto - mtune : cortex-a72.cortex-a53 + - target_triple : "aarch64-unknown-linux-gnu" flags: diff --git a/boulder/data/macros/arch/base.yml b/boulder/data/macros/arch/base.yml index 9970399b1..75b0ec4c2 100644 --- a/boulder/data/macros/arch/base.yml +++ b/boulder/data/macros/arch/base.yml @@ -38,6 +38,7 @@ definitions: - cpp : "%(compiler_cpp)" - objcpp : "%(compiler_objcpp)" - objcxxcpp : "%(compiler_objcxxcpp)" + - d : "%(compiler_d)" - ar : "%(compiler_ar)" - ld : "%(compiler_ld)" - objcopy : "%(compiler_objcopy)" @@ -64,6 +65,7 @@ actions : CGO_CXXFLAGS="%(cxxflags)"; export CGO_CXXFLAGS LDFLAGS="%(ldflags)"; export LDFLAGS CGO_LDFLAGS="%(ldflags) -Wl,--no-gc-sections"; export CGO_LDFLAGS + DFLAGS="%(dflags)"; export DFLAGS CC="%(cc)"; export CC CXX="%(cxx)"; export CXX OBJC="%(objc)"; export OBJC @@ -286,14 +288,17 @@ flags : c : "-pipe -Wformat -Wformat-security -Wno-error -fPIC" cxx : "-pipe -Wformat -Wformat-security -Wno-error -fPIC" ld : "-Wl,-O2,--gc-sections" + d : "-release -Hkeep-all-bodies -relocation-model=pic -wi" - omit-frame-pointer: c : "-fomit-frame-pointer -momit-leaf-frame-pointer" cxx : "-fomit-frame-pointer -momit-leaf-frame-pointer" + d : "-frame-pointer=none" - no-omit-frame-pointer: c : "-fno-omit-frame-pointer -mno-omit-leaf-frame-pointer" cxx : "-fno-omit-frame-pointer -mno-omit-leaf-frame-pointer" + d : "-frame-pointer=all" # Toggle bindnow (ON) - bindnow: @@ -347,28 +352,35 @@ flags : - optimize-generic: c : "-O2" cxx : "-O2" + d : "-O2" # Optimize for size (OFF) - optimize-size: c : "-Os" cxx : "-Os" + d : "-Os" # Optimize for speed (OFF) - optimize-speed: c : "-O3" cxx : "-O3" + d : "-O3" # Enable LTO optimisations (OFF) - lto-full: c : "-flto" cxx : "-flto" ld : "-flto" + llvm: + d : "-flto=full" + # Enable Thin-LTO optimisations (OFF) - lto-thin: llvm: c : "-flto=thin" cxx : "-flto=thin" + d : "-flto=thin" ld : "-flto=thin" # Enable LTOextra optimisations (OFF) @@ -406,6 +418,7 @@ flags : llvm: c : "-Xclang -mllvm -Xclang -polly -Xclang -mllvm -Xclang -polly-vectorizer=stripmine" cxx : "-Xclang -mllvm -Xclang -polly -Xclang -mllvm -Xclang -polly-vectorizer=stripmine" + d : "-polly -polly-vectorizer=stripmine" # Toggle options you want to use with llvm-bolt (OFF) - bolt: @@ -426,14 +439,21 @@ flags : llvm: c : "-gline-tables-only -fasynchronous-unwind-tables" cxx : "-gline-tables-only -fasynchronous-unwind-tables" + d : "-gline-tables-only -gc" # Toggle debug-std optimisations (ON) - debug-std: - c : "-g -feliminate-unused-debug-types -fasynchronous-unwind-tables" - cxx : "-g -feliminate-unused-debug-types -fasynchronous-unwind-tables" + d : "-g -gc -d-debug" + gnu: + c : "-g -feliminate-unused-debug-types -fasynchronous-unwind-tables" + cxx : "-g -feliminate-unused-debug-types -fasynchronous-unwind-tables" + llvm: + c : "-g -fasynchronous-unwind-tables" + cxx : "-g -fasynchronous-unwind-tables" # Toggle fast math (OFF) - math: + d : "-ffast-math -fp-contract=fast" gnu: c : "-fno-math-errno -fno-trapping-math" cxx : "-fno-math-errno -fno-trapping-math" @@ -445,6 +465,7 @@ flags : - noplt: c : "-fno-plt" cxx : "-fno-plt" + d : "-fno-plt" # Toggle -fno-semantic-interposition (OFF) - nosemantic: @@ -486,6 +507,7 @@ flags : - visibility-hidden: c : "-fvisibility=hidden" cxx : "-fvisibility-inlines-hidden -fvisibility=hidden" + d : "-fvisibility=hidden" # Toggle visibility inlines hidden (OFF) - visibility-inline: diff --git a/boulder/data/macros/arch/emul32/x86_64.yml b/boulder/data/macros/arch/emul32/x86_64.yml index be4b96fd1..e065dc489 100644 --- a/boulder/data/macros/arch/emul32/x86_64.yml +++ b/boulder/data/macros/arch/emul32/x86_64.yml @@ -10,6 +10,7 @@ definitions: - cpp : "%(compiler_cpp) -m32" - march : i686 - mtune : i686 + - target_triple : "i686-unknown-linux-gnu" - pkgconfigpath : "%(libdir)/pkgconfig:/usr/share/pkgconfig:%(prefix)/lib/pkgconfig" flags: diff --git a/boulder/data/macros/arch/x86.yml b/boulder/data/macros/arch/x86.yml index 06ef7e92c..368cf223b 100644 --- a/boulder/data/macros/arch/x86.yml +++ b/boulder/data/macros/arch/x86.yml @@ -10,6 +10,7 @@ definitions: - cpp : "%(compiler_cpp) -m32" - march : i686 - mtune : i686 + - target_triple : "i686-unknown-linux-gnu" flags: diff --git a/boulder/data/macros/arch/x86_64-stage1.yml b/boulder/data/macros/arch/x86_64-stage1.yml index a1e3b0b8c..dfb87a2bd 100644 --- a/boulder/data/macros/arch/x86_64-stage1.yml +++ b/boulder/data/macros/arch/x86_64-stage1.yml @@ -15,6 +15,7 @@ definitions: - cpp : "%(compiler_cpp)" - march : x86-64-v2 - mtune : ivybridge + - target_triple : "x86_64-unknown-linux-gnu" - bootstrap_root : /bill flags: @@ -26,4 +27,4 @@ flags: defaultTuningGroups : - base - - optimize \ No newline at end of file + - optimize diff --git a/boulder/data/macros/arch/x86_64-v3x.yml b/boulder/data/macros/arch/x86_64-v3x.yml new file mode 100644 index 000000000..34c9c4310 --- /dev/null +++ b/boulder/data/macros/arch/x86_64-v3x.yml @@ -0,0 +1,20 @@ +# Provides -m64 builds for x86_64-v3x build-hosts + +definitions: + + - libsuffix : "" + - build_platform : x86_64-%(vendorID) + - host_platform : x86_64-%(vendorID) + - cc : "%(compiler_c)" + - cxx : "%(compiler_cxx)" + - cpp : "%(compiler_cpp)" + - march : x86-64-v3 + - mtune : znver1 + +flags: + + # Set architecture flags (GCC) + - architecture: + c : "-march=x86-64-v3 -mtune=znver1 -maes -mfsgsbase -mpclmul -mrdrnd -maes -mxsaveopt" + cxx : "-march=x86-64-v3 -mtune=znver1 -maes -mfsgsbase -mpclmul -mrdrnd -maes -mxsaveopt" + diff --git a/boulder/data/macros/arch/x86_64.yml b/boulder/data/macros/arch/x86_64.yml index 7e812f8bd..62338b0de 100644 --- a/boulder/data/macros/arch/x86_64.yml +++ b/boulder/data/macros/arch/x86_64.yml @@ -8,8 +8,10 @@ definitions: - cc : "%(compiler_c)" - cxx : "%(compiler_cxx)" - cpp : "%(compiler_cpp)" + - d : "%(compiler_d)" - march : x86-64-v2 - mtune : ivybridge + - target_triple : "x86_64-unknown-linux-gnu" flags: @@ -17,3 +19,4 @@ flags: - architecture: c : "-march=x86-64-v2 -mtune=ivybridge" cxx : "-march=x86-64-v2 -mtune=ivybridge" + d : "-mcpu=x86-64-v2" diff --git a/boulder/data/recipeTemplate.yml b/boulder/data/recipeTemplate.yml new file mode 100644 index 000000000..238a45aaf --- /dev/null +++ b/boulder/data/recipeTemplate.yml @@ -0,0 +1,14 @@ +# +# SPDX-FileCopyrightText: © 2020-2024 Serpent OS Developers +# +# SPDX-License-Identifier: MPL-2.0 +# +name : %s +version : %s +release : %s +homepage : %s +upstreams : +%s +summary : %s +description : | + %s From 26aa55a4b6e50ff01b2751f19d298ce4fe5976f5 Mon Sep 17 00:00:00 2001 From: Rune Morling Date: Wed, 28 Feb 2024 19:00:35 +0100 Subject: [PATCH 18/26] boulder: Create README.md Adds a note on user name spaces. Signed-off-by: Rune Morling --- boulder/README.md | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 boulder/README.md diff --git a/boulder/README.md b/boulder/README.md new file mode 100644 index 000000000..4a4be0805 --- /dev/null +++ b/boulder/README.md @@ -0,0 +1,29 @@ +# Boulder + +This directory contains the Serpent OS package building tool `boulder`. + +## Building boulder + +To build boulder, use the `boulder` target: + + cargo build -p boulder + +This will produce a debug build by default, which is available as `./target/debug/boulder` + +The [onboarding/ repository](https://github.com/serpent-os/onboarding/) is in the process of being updated to default to building the Rust based boulder. + +## Configuring user namespaces + +Boulder supports building as your own user, using a feature called "user namespaces". + +If your username is `bob` with `UID = 1000` and `GID = 1000` then you will need to add the following files with the following contents: + + $ echo 'bob:100000:65536' |sudo tee /etc/subuid + $ echo 'bob:100000:65536' |sudo tee /etc/subgid + +NB: The above assumes you haven't already configured user namespaces. + +You can check your username, UID and GID with `grep ${USER} /etc/passwd`, where your username is the first field, the UID is the third field and the GID is the fourth field: + + $ grep ${USER} /etc/passwd + bob:x:1000:1000:bob:/home/bob:/bin/bash From 46ab75b5307593731d2f537037912ab30bdee610 Mon Sep 17 00:00:00 2001 From: Rune Morling Date: Wed, 28 Feb 2024 21:01:42 +0100 Subject: [PATCH 19/26] boulder: Tweak Analysis Phase output to look slick Signed-off-by: Rune Morling --- boulder/src/package/analysis.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/boulder/src/package/analysis.rs b/boulder/src/package/analysis.rs index 33267611a..088fdcf56 100644 --- a/boulder/src/package/analysis.rs +++ b/boulder/src/package/analysis.rs @@ -47,7 +47,7 @@ impl<'a> Chain<'a> { } pub fn process(&mut self, paths: impl IntoIterator) -> Result<(), BoxError> { - println!("Analyzing artefacts\n"); + println!("│Analyzing artefacts (» = Include, × = Ignore)"); let mut queue = paths.into_iter().collect::>(); @@ -90,16 +90,16 @@ impl<'a> Chain<'a> { Decision::NextHandler => continue 'handlers, Decision::IgnoreFile { reason } => { pb.println(format!( - "{} {}{}", - "Ignored ".yellow(), - path.target_path.display(), - format!(" ({reason})").dim() + "│A{} {} {}", + "│ ×".yellow(), + format!("{}", path.target_path.display()).dim(), + format!("({reason})").yellow() )); pb.inc(1); continue 'paths; } Decision::IncludeFile => { - pb.println(format!("{} {}", "Included".green(), path.target_path.display())); + pb.println(format!("│A{} {}", "│ »".green(), path.target_path.display())); pb.inc(1); bucket.paths.push(path); continue 'paths; From 9f5ea307f451c8829556f88a6ef987be073eacce Mon Sep 17 00:00:00 2001 From: Cory Forsstrom Date: Wed, 28 Feb 2024 15:51:09 -0800 Subject: [PATCH 20/26] Deasyncify all but network fetching --- Cargo.lock | 4 +- boulder/Cargo.toml | 1 + boulder/src/build.rs | 31 +- boulder/src/build/root.rs | 23 +- boulder/src/build/upstream.rs | 162 ++--- boulder/src/cli/profile.rs | 37 +- boulder/src/cli/recipe.rs | 9 +- boulder/src/env.rs | 6 +- boulder/src/lib.rs | 2 - boulder/src/macros.rs | 4 +- boulder/src/package.rs | 8 +- boulder/src/package/analysis.rs | 3 +- boulder/src/package/analysis/handler/elf.rs | 2 +- boulder/src/package/collect.rs | 4 +- boulder/src/package/emit.rs | 2 +- boulder/src/package/emit/manifest.rs | 1 - boulder/src/package/emit/manifest/binary.rs | 11 +- boulder/src/paths.rs | 10 +- boulder/src/profile.rs | 7 +- boulder/src/runtime.rs | 36 -- boulder/src/util.rs | 156 ++--- crates/config/Cargo.toml | 3 - crates/config/src/lib.rs | 42 +- moss/src/cli/extract.rs | 11 +- moss/src/cli/index.rs | 126 ++-- moss/src/cli/info.rs | 13 +- moss/src/cli/inspect.rs | 25 +- moss/src/cli/install.rs | 6 +- moss/src/cli/list.rs | 9 +- moss/src/cli/mod.rs | 26 +- moss/src/cli/remove.rs | 18 +- moss/src/cli/repo.rs | 62 +- moss/src/cli/state.rs | 27 +- moss/src/cli/sync.rs | 54 +- moss/src/client/cache.rs | 160 +++-- moss/src/client/install.rs | 34 +- moss/src/client/mod.rs | 250 ++++---- moss/src/client/postblit.rs | 4 +- moss/src/client/prune.rs | 93 ++- moss/src/config.rs | 158 ----- moss/src/db/layout/mod.rs | 336 +++++----- moss/src/db/meta/mod.rs | 656 ++++++++++---------- moss/src/db/mod.rs | 26 + moss/src/db/state/mod.rs | 253 ++++---- moss/src/lib.rs | 2 +- moss/src/main.rs | 5 +- moss/src/registry/job.rs | 49 -- moss/src/registry/mod.rs | 70 +-- moss/src/registry/plugin/active.rs | 18 +- moss/src/registry/plugin/cobble.rs | 28 +- moss/src/registry/plugin/mod.rs | 56 +- moss/src/registry/plugin/repository.rs | 23 +- moss/src/registry/transaction.rs | 57 +- moss/src/repository/manager.rs | 154 +++-- moss/src/runtime.rs | 69 ++ moss/src/stone.rs | 67 -- 56 files changed, 1573 insertions(+), 1936 deletions(-) delete mode 100644 boulder/src/runtime.rs delete mode 100644 moss/src/config.rs delete mode 100644 moss/src/registry/job.rs create mode 100644 moss/src/runtime.rs delete mode 100644 moss/src/stone.rs diff --git a/Cargo.lock b/Cargo.lock index 4560b5fb3..ac7e53487 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -211,6 +211,7 @@ dependencies = [ "serde_json", "serde_yaml", "sha2", + "stone", "stone_recipe", "strum", "thiserror", @@ -353,12 +354,9 @@ name = "config" version = "0.1.0" dependencies = [ "dirs", - "futures", "serde", "serde_yaml", "thiserror", - "tokio", - "tokio-stream", ] [[package]] diff --git a/boulder/Cargo.toml b/boulder/Cargo.toml index 155b8ff6e..49f9e30c8 100644 --- a/boulder/Cargo.toml +++ b/boulder/Cargo.toml @@ -9,6 +9,7 @@ edition.workspace = true config = { path = "../crates/config" } container = { path = "../crates/container" } moss = { path = "../moss" } +stone = { path = "../crates/stone" } stone_recipe = { path = "../crates/stone_recipe" } tui = { path = "../crates/tui" } yaml = { path = "../crates/yaml" } diff --git a/boulder/src/build.rs b/boulder/src/build.rs index 5fdb36902..199274ed8 100644 --- a/boulder/src/build.rs +++ b/boulder/src/build.rs @@ -7,9 +7,11 @@ use std::{ os::unix::process::ExitStatusExt, path::{Path, PathBuf}, process, thread, + time::Duration, }; use itertools::Itertools; +use moss::runtime; use nix::{ sys::signal::Signal, unistd::{getpgrp, setpgid, Pid}, @@ -31,7 +33,7 @@ use crate::{ architecture::BuildTarget, container, macros, package::{self, Packager}, - profile, recipe, util, Env, Macros, Paths, Recipe, Runtime, + profile, recipe, util, Env, Macros, Paths, Recipe, }; pub struct Builder { @@ -103,18 +105,25 @@ impl Builder { pub fn setup(&self) -> Result<(), Error> { root::clean(self)?; - let rt = Runtime::new()?; - rt.block_on(async { - let profiles = profile::Manager::new(&self.env).await; + let rt = runtime::init(); - let repos = profiles.repositories(&self.profile)?.clone(); + let profiles = profile::Manager::new(&self.env); + let repos = profiles.repositories(&self.profile)?.clone(); - root::populate(self, repos).await?; - upstream::sync(&self.recipe, &self.paths).await?; + root::populate(self, repos)?; + upstream::sync(&self.recipe, &self.paths)?; - Ok(()) as Result<_, Error> - })?; rt.destroy(); + // We want to ensure no threads exist before + // cloning into container. Sometimes a deadlock + // occurs which appears related to a race condition + // from some thread artifacts still existing. Adding + // this delay allows things to get cleaned up. + // NOTE: This appears to reliably fix the problem, + // I ran boulder 100 times w/ and w/out this delay + // and the deadlock never occured w/ it, but w/out + // it occured within 10 attempts. + thread::sleep(Duration::from_millis(50)); Ok(()) } @@ -143,11 +152,11 @@ impl Builder { let is_pgo = job.pgo_stage.is_some(); // Recreate work dir for each job - util::sync::recreate_dir(&job.work_dir)?; + util::recreate_dir(&job.work_dir)?; // Ensure pgo dir exists if is_pgo { let pgo_dir = PathBuf::from(format!("{}-pgo", job.build_dir.display())); - util::sync::ensure_dir_exists(&pgo_dir)?; + util::ensure_dir_exists(&pgo_dir)?; } if let Some(stage) = job.pgo_stage { diff --git a/boulder/src/build/root.rs b/boulder/src/build/root.rs index 19e25e1e3..5f4ef1879 100644 --- a/boulder/src/build/root.rs +++ b/boulder/src/build/root.rs @@ -5,26 +5,31 @@ use std::collections::HashSet; use std::io; -use moss::repository; +use moss::{repository, runtime}; use stone_recipe::{tuning::Toolchain, Upstream}; use thiserror::Error; use crate::build::Builder; use crate::{container, util}; -pub async fn populate(builder: &Builder, repositories: repository::Map) -> Result<(), Error> { +pub fn populate(builder: &Builder, repositories: repository::Map) -> Result<(), Error> { let packages = packages(builder); let rootfs = builder.paths.rootfs().host; // Recreate root - util::recreate_dir(&rootfs).await?; + util::recreate_dir(&rootfs)?; - let mut moss_client = moss::Client::with_explicit_repositories("boulder", &builder.env.moss_dir, repositories) - .await? - .ephemeral(&rootfs)?; + // Create the moss client + let mut moss_client = + moss::Client::with_explicit_repositories("boulder", &builder.env.moss_dir, repositories)?.ephemeral(&rootfs)?; - moss_client.install(&packages, true).await?; + // Ensure all configured repos have been initialized (important since users + // might add profile configs from an editor) + runtime::block_on(moss_client.ensure_repos_initialized())?; + + // Install packages + moss_client.install(&packages, true)?; Ok(()) } @@ -40,12 +45,12 @@ pub fn clean(builder: &Builder) -> Result<(), Error> { // and there's subuid mappings into the user namespace container::exec(&builder.paths, false, || { // Recreate `install` dir - util::sync::recreate_dir(&builder.paths.install().guest)?; + util::recreate_dir(&builder.paths.install().guest)?; for target in &builder.targets { for job in &target.jobs { // Recerate build dir - util::sync::recreate_dir(&job.build_dir)?; + util::recreate_dir(&job.build_dir)?; } } diff --git a/boulder/src/build/upstream.rs b/boulder/src/build/upstream.rs index ba53ec111..7a5a13d95 100644 --- a/boulder/src/build/upstream.rs +++ b/boulder/src/build/upstream.rs @@ -3,19 +3,18 @@ // SPDX-License-Identifier: MPL-2.0 use std::{ - io, + fs, io, path::{Path, PathBuf}, str::FromStr, time::Duration, }; use futures::{stream, StreamExt, TryStreamExt}; +use moss::runtime; use nix::unistd::{linkat, LinkatFlags}; use sha2::{Digest, Sha256}; use thiserror::Error; -use tokio::fs::{copy, remove_dir_all}; use tokio::io::AsyncWriteExt; -use tokio::process::Command; use tui::{MultiProgress, ProgressBar, ProgressStyle, Stylize}; use url::Url; @@ -23,7 +22,7 @@ use crate::{util, Paths, Recipe}; /// Cache all upstreams from the provided [`Recipe`] and make them available /// in the guest rootfs. -pub async fn sync(recipe: &Recipe, paths: &Paths) -> Result<(), Error> { +pub fn sync(recipe: &Recipe, paths: &Paths) -> Result<(), Error> { let upstreams = recipe .parsed .upstreams @@ -47,43 +46,52 @@ pub async fn sync(recipe: &Recipe, paths: &Paths) -> Result<(), Error> { tp.tick(); let upstream_dir = paths.guest_host_path(&paths.upstreams()); - util::ensure_dir_exists(&upstream_dir).await?; - - stream::iter(&upstreams) - .map(|upstream| async { - let pb = mp.insert_before( - &tp, - ProgressBar::new(u64::MAX) - .with_message(format!("{} {}", "Downloading".blue(), upstream.name().bold(),)), - ); - pb.enable_steady_tick(Duration::from_millis(150)); - - let install = upstream.fetch(paths, &pb).await?; - - pb.set_message(format!("{} {}", "Copying".yellow(), upstream.name().bold(),)); - pb.set_style( - ProgressStyle::with_template(" {spinner} {wide_msg} ") - .unwrap() - .tick_chars("--=≡■≡=--"), - ); - - install.share(&upstream_dir).await?; - - let cached_tag = install - .was_cached() - .then_some(format!("{}", " (cached)".dim())) - .unwrap_or_default(); - - pb.finish(); - mp.remove(&pb); - mp.println(format!("{} {}{}", "Shared".green(), upstream.name().bold(), cached_tag,))?; - tp.inc(1); - - Ok(()) as Result<_, Error> - }) - .buffer_unordered(moss::environment::MAX_NETWORK_CONCURRENCY) - .try_collect::<()>() - .await?; + util::ensure_dir_exists(&upstream_dir)?; + + runtime::block_on( + stream::iter(&upstreams) + .map(|upstream| async { + let pb = mp.insert_before( + &tp, + ProgressBar::new(u64::MAX).with_message(format!( + "{} {}", + "Downloading".blue(), + upstream.name().bold(), + )), + ); + pb.enable_steady_tick(Duration::from_millis(150)); + + let install = upstream.fetch(paths, &pb).await?; + + pb.set_message(format!("{} {}", "Copying".yellow(), upstream.name().bold(),)); + pb.set_style( + ProgressStyle::with_template(" {spinner} {wide_msg} ") + .unwrap() + .tick_chars("--=≡■≡=--"), + ); + + runtime::unblock({ + let install = install.clone(); + let dir = upstream_dir.clone(); + move || install.share(&dir) + }) + .await?; + + let cached_tag = install + .was_cached() + .then_some(format!("{}", " (cached)".dim())) + .unwrap_or_default(); + + pb.finish(); + mp.remove(&pb); + mp.println(format!("{} {}{}", "Shared".green(), upstream.name().bold(), cached_tag,))?; + tp.inc(1); + + Ok(()) as Result<_, Error> + }) + .buffer_unordered(moss::environment::MAX_NETWORK_CONCURRENCY) + .try_collect::<()>(), + )?; mp.clear()?; println!(); @@ -91,6 +99,7 @@ pub async fn sync(recipe: &Recipe, paths: &Paths) -> Result<(), Error> { Ok(()) } +#[derive(Clone)] enum Installed { Plain { name: String, @@ -112,7 +121,7 @@ impl Installed { } } - async fn share(&self, dest_dir: &Path) -> Result<(), Error> { + fn share(&self, dest_dir: &Path) -> Result<(), Error> { match self { Installed::Plain { name, path, .. } => { let target = dest_dir.join(name); @@ -122,12 +131,12 @@ impl Installed { // Copy instead if link_result.is_err() { - copy(&path, &target).await?; + fs::copy(path, &target)?; } } Installed::Git { name, path, .. } => { let target = dest_dir.join(name); - util::copy_dir(path, &target).await?; + util::copy_dir(path, &target)?; } } @@ -207,20 +216,17 @@ impl Plain { } } - async fn path(&self, paths: &Paths) -> PathBuf { + fn path(&self, paths: &Paths) -> PathBuf { // Type safe guaranteed to be >= 5 bytes let hash = &self.hash.0; - let parent = paths + paths .upstreams() .host .join("fetched") .join(&hash[..5]) - .join(&hash[hash.len() - 5..]); - - let _ = util::ensure_dir_exists(&parent).await; - - parent.join(hash) + .join(&hash[hash.len() - 5..]) + .join(hash) } async fn fetch(&self, paths: &Paths, pb: &ProgressBar) -> Result { @@ -234,7 +240,11 @@ impl Plain { ); let name = self.name(); - let path = self.path(paths).await; + let path = self.path(paths); + + if let Some(parent) = path.parent().map(Path::to_path_buf) { + runtime::unblock(move || util::recreate_dir(&parent)).await?; + } if path.exists() { return Ok(Installed::Plain { @@ -290,23 +300,26 @@ impl Git { util::uri_file_name(&self.uri) } - async fn final_path(&self, paths: &Paths) -> PathBuf { - let parent = paths.upstreams().host.join("git"); - - let _ = util::ensure_dir_exists(&parent).await; - - parent.join(util::uri_relative_path(&self.uri)) + fn final_path(&self, paths: &Paths) -> PathBuf { + paths + .upstreams() + .host + .join("git") + .join(util::uri_relative_path(&self.uri)) } - async fn staging_path(&self, paths: &Paths) -> PathBuf { - let parent = paths.upstreams().host.join("staging").join("git"); - - let _ = util::ensure_dir_exists(&parent).await; - - parent.join(util::uri_relative_path(&self.uri)) + fn staging_path(&self, paths: &Paths) -> PathBuf { + paths + .upstreams() + .host + .join("staging") + .join("git") + .join(util::uri_relative_path(&self.uri)) } async fn fetch(&self, paths: &Paths, pb: &ProgressBar) -> Result { + use tokio::fs; + pb.set_style( ProgressStyle::with_template(" {spinner} {wide_msg} ") .unwrap() @@ -314,15 +327,22 @@ impl Git { ); let clone_path = if self.staging { - self.staging_path(paths).await + self.staging_path(paths) } else { - self.final_path(paths).await + self.final_path(paths) }; let clone_path_string = clone_path.display().to_string(); - let final_path = self.final_path(paths).await; + let final_path = self.final_path(paths); let final_path_string = final_path.display().to_string(); + if let Some(parent) = clone_path.parent().map(Path::to_path_buf) { + runtime::unblock(move || util::recreate_dir(&parent)).await?; + } + if let Some(parent) = final_path.parent().map(Path::to_path_buf) { + runtime::unblock(move || util::recreate_dir(&parent)).await?; + } + if self.ref_exists(&final_path).await? { self.reset_to_ref(&final_path).await?; return Ok(Installed::Git { @@ -332,9 +352,9 @@ impl Git { }); } - let _ = remove_dir_all(&clone_path).await; + let _ = fs::remove_dir_all(&clone_path).await; if self.staging { - let _ = remove_dir_all(&final_path).await; + let _ = fs::remove_dir_all(&final_path).await; } let mut args = vec!["clone"]; @@ -393,7 +413,9 @@ impl Git { } async fn run(&self, args: &[&str], cwd: Option<&Path>) -> Result<(), Error> { - let mut command = Command::new("git"); + use tokio::process; + + let mut command = process::Command::new("git"); if let Some(dir) = cwd { command.current_dir(dir); diff --git a/boulder/src/cli/profile.rs b/boulder/src/cli/profile.rs index 266c15c6b..74b6c4e5e 100644 --- a/boulder/src/cli/profile.rs +++ b/boulder/src/cli/profile.rs @@ -4,10 +4,10 @@ use std::{collections::HashMap, io}; -use boulder::{profile, Env, Profile, Runtime}; +use boulder::{profile, Env, Profile}; use clap::Parser; use itertools::Itertools; -use moss::{repository, Repository}; +use moss::{repository, runtime, Repository}; use thiserror::Error; use url::Url; @@ -75,13 +75,14 @@ fn parse_repository(s: &str) -> Result<(repository::Id, Repository), String> { } pub fn handle(command: Command, env: Env) -> Result<(), Error> { - let rt = Runtime::new()?; - let manager = rt.block_on(profile::Manager::new(&env)); + let _guard = runtime::init(); + + let manager = profile::Manager::new(&env); match command.subcommand { Subcommand::List => list(manager), - Subcommand::Add { name, repos } => rt.block_on(add(&env, manager, name, repos)), - Subcommand::Update { profile } => rt.block_on(update(&env, manager, &profile)), + Subcommand::Add { name, repos } => add(&env, manager, name, repos), + Subcommand::Update { profile } => update(&env, manager, &profile), } } @@ -106,7 +107,7 @@ pub fn list(manager: profile::Manager) -> Result<(), Error> { Ok(()) } -pub async fn add<'a>( +pub fn add<'a>( env: &'a Env, mut manager: profile::Manager<'a>, name: String, @@ -114,27 +115,25 @@ pub async fn add<'a>( ) -> Result<(), Error> { let id = profile::Id::new(name); - manager - .save_profile( - id.clone(), - Profile { - collections: repository::Map::with(repos), - }, - ) - .await?; + manager.save_profile( + id.clone(), + Profile { + collections: repository::Map::with(repos), + }, + )?; - update(env, manager, &id).await?; + update(env, manager, &id)?; println!("Profile \"{id}\" has been added"); Ok(()) } -pub async fn update<'a>(env: &'a Env, manager: profile::Manager<'a>, profile: &profile::Id) -> Result<(), Error> { +pub fn update<'a>(env: &'a Env, manager: profile::Manager<'a>, profile: &profile::Id) -> Result<(), Error> { let repos = manager.repositories(profile)?.clone(); - let mut moss_client = moss::Client::with_explicit_repositories("boulder", &env.moss_dir, repos).await?; - moss_client.refresh_repositories().await?; + let mut moss_client = moss::Client::with_explicit_repositories("boulder", &env.moss_dir, repos)?; + runtime::block_on(moss_client.refresh_repositories())?; println!("Profile {profile} updated"); diff --git a/boulder/src/cli/recipe.rs b/boulder/src/cli/recipe.rs index 522bb3d17..525cb3f0c 100644 --- a/boulder/src/cli/recipe.rs +++ b/boulder/src/cli/recipe.rs @@ -7,10 +7,9 @@ use std::{ path::PathBuf, }; -use boulder::Runtime; use clap::Parser; use futures::StreamExt; -use moss::request; +use moss::{request, runtime}; use sha2::{Digest, Sha256}; use stone_recipe::Recipe; use thiserror::Error; @@ -133,7 +132,7 @@ fn update(recipe: Option, overwrite: bool, version: String, upstreams: } // Needed to fetch - let rt = Runtime::new().map_err(Error::Runtime)?; + let _guard = runtime::init(); // Add all update operations let mut updater = yaml::Updater::new(); @@ -146,7 +145,7 @@ fn update(recipe: Option, overwrite: bool, version: String, upstreams: updater.update_value(version, |root| root / "version"); } Update::PlainUpstream(i, key, new_uri) => { - let hash = rt.block_on(fetch_hash(new_uri.clone()))?; + let hash = runtime::block_on(fetch_hash(new_uri.clone()))?; let path = |root| root / "upstreams" / i / key.as_str().unwrap_or_default(); @@ -216,8 +215,6 @@ pub enum Error { Fetch(#[from] request::Error), #[error("fetch upstream")] FetchIo(#[source] io::Error), - #[error("runtime")] - Runtime(#[source] io::Error), #[error("invalid utf-8 input")] Utf8(#[from] std::string::FromUtf8Error), } diff --git a/boulder/src/env.rs b/boulder/src/env.rs index 81412a731..048b10220 100644 --- a/boulder/src/env.rs +++ b/boulder/src/env.rs @@ -36,9 +36,9 @@ impl Env { let data_dir = resolve_data_dir(data_dir); let moss_dir = resolve_moss_root(is_root, moss_root)?; - util::sync::ensure_dir_exists(&cache_dir)?; - util::sync::ensure_dir_exists(&data_dir)?; - util::sync::ensure_dir_exists(&moss_dir)?; + util::ensure_dir_exists(&cache_dir)?; + util::ensure_dir_exists(&data_dir)?; + util::ensure_dir_exists(&moss_dir)?; Ok(Self { config, diff --git a/boulder/src/lib.rs b/boulder/src/lib.rs index 89ddf38a9..c4703347c 100644 --- a/boulder/src/lib.rs +++ b/boulder/src/lib.rs @@ -7,7 +7,6 @@ pub use self::macros::Macros; pub use self::paths::Paths; pub use self::profile::Profile; pub use self::recipe::Recipe; -pub use self::runtime::Runtime; pub mod architecture; pub mod build; @@ -18,5 +17,4 @@ pub mod package; pub mod paths; pub mod profile; pub mod recipe; -mod runtime; pub mod util; diff --git a/boulder/src/macros.rs b/boulder/src/macros.rs index 784e0f1b2..b52dfd11c 100644 --- a/boulder/src/macros.rs +++ b/boulder/src/macros.rs @@ -22,8 +22,8 @@ impl Macros { let matcher = |p: &Path| p.extension().and_then(|s| s.to_str()) == Some("yml"); - let arch_files = util::sync::enumerate_files(&arch_dir, matcher).map_err(Error::ArchFiles)?; - let action_files = util::sync::enumerate_files(&actions_dir, matcher).map_err(Error::ActionFiles)?; + let arch_files = util::enumerate_files(&arch_dir, matcher).map_err(Error::ArchFiles)?; + let action_files = util::enumerate_files(&actions_dir, matcher).map_err(Error::ActionFiles)?; let mut arch = HashMap::new(); let mut actions = vec![]; diff --git a/boulder/src/package.rs b/boulder/src/package.rs index 3be650243..c9eca2783 100644 --- a/boulder/src/package.rs +++ b/boulder/src/package.rs @@ -7,7 +7,7 @@ use std::{ }; use itertools::Itertools; -use moss::stone::write::digest; +use stone::write::digest; use stone_recipe::{script, Package}; use thiserror::Error; @@ -52,7 +52,7 @@ impl Packager { pub fn package(self) -> Result<(), Error> { // Remove old artifacts - util::sync::recreate_dir(&self.paths.artefacts().host).map_err(Error::RecreateArtefactsDir)?; + util::recreate_dir(&self.paths.artefacts().host).map_err(Error::RecreateArtefactsDir)?; // Executed in guest container since file permissions may be borked // for host if run rootless @@ -198,7 +198,7 @@ fn resolve_packages( } fn sync_artefacts(paths: &Paths) -> Result<(), io::Error> { - for path in util::sync::enumerate_files(&paths.artefacts().host, |_| true)? { + for path in util::enumerate_files(&paths.artefacts().host, |_| true)? { let filename = path.file_name().and_then(|p| p.to_str()).unwrap_or_default(); let target = paths.recipe().host.join(filename); @@ -207,7 +207,7 @@ fn sync_artefacts(paths: &Paths) -> Result<(), io::Error> { fs::remove_file(&target)?; } - util::sync::hardlink_or_copy(&path, &target)?; + util::hardlink_or_copy(&path, &target)?; } Ok(()) } diff --git a/boulder/src/package/analysis.rs b/boulder/src/package/analysis.rs index 33267611a..edb62258c 100644 --- a/boulder/src/package/analysis.rs +++ b/boulder/src/package/analysis.rs @@ -7,7 +7,8 @@ use std::{ path::PathBuf, }; -use moss::{stone::write::digest, Dependency, Provider}; +use moss::{Dependency, Provider}; +use stone::write::digest; use tui::{ProgressBar, ProgressStyle, Stylize}; use super::collect::{Collector, PathInfo}; diff --git a/boulder/src/package/analysis/handler/elf.rs b/boulder/src/package/analysis/handler/elf.rs index fa1343710..d5d95f68a 100644 --- a/boulder/src/package/analysis/handler/elf.rs +++ b/boulder/src/package/analysis/handler/elf.rs @@ -232,7 +232,7 @@ fn split_debug( return Ok(None); } - util::sync::ensure_dir_exists(&debug_info_dir)?; + util::ensure_dir_exists(&debug_info_dir)?; let output = Command::new(objcopy) .arg("--only-keep-debug") diff --git a/boulder/src/package/collect.rs b/boulder/src/package/collect.rs index 08c60115a..7f3175810 100644 --- a/boulder/src/package/collect.rs +++ b/boulder/src/package/collect.rs @@ -10,9 +10,9 @@ use std::{ }; use glob::Pattern; -use moss::stone::payload::{layout, Layout}; -use moss::stone::write::digest; use nix::libc::{S_IFDIR, S_IRGRP, S_IROTH, S_IRWXU, S_IXGRP, S_IXOTH}; +use stone::payload::{layout, Layout}; +use stone::write::digest; use thiserror::Error; #[derive(Debug, Clone, Eq, PartialEq)] diff --git a/boulder/src/package/emit.rs b/boulder/src/package/emit.rs index c50813b33..ec17b689c 100644 --- a/boulder/src/package/emit.rs +++ b/boulder/src/package/emit.rs @@ -8,7 +8,7 @@ use std::{ }; use itertools::Itertools; -use moss::{package::Meta, stone, Dependency}; +use moss::{package::Meta, Dependency}; use thiserror::Error; use tui::{ProgressBar, ProgressReader, ProgressStyle, Stylize}; diff --git a/boulder/src/package/emit/manifest.rs b/boulder/src/package/emit/manifest.rs index 9850098ff..5de559915 100644 --- a/boulder/src/package/emit/manifest.rs +++ b/boulder/src/package/emit/manifest.rs @@ -4,7 +4,6 @@ use std::{collections::BTreeSet, io, path::PathBuf}; -use moss::stone; use thiserror::Error; use crate::{Architecture, Paths, Recipe}; diff --git a/boulder/src/package/emit/manifest/binary.rs b/boulder/src/package/emit/manifest/binary.rs index 8931f4354..93152f8b2 100644 --- a/boulder/src/package/emit/manifest/binary.rs +++ b/boulder/src/package/emit/manifest/binary.rs @@ -4,13 +4,10 @@ use std::{collections::BTreeSet, fs::File, path::Path}; -use moss::{ - stone::{ - self, - header::v1::FileType, - payload::{self, meta}, - }, - Dependency, +use moss::Dependency; +use stone::{ + header::v1::FileType, + payload::{self, meta}, }; use super::Error; diff --git a/boulder/src/paths.rs b/boulder/src/paths.rs index 9b98e0265..c289d2b85 100644 --- a/boulder/src/paths.rs +++ b/boulder/src/paths.rs @@ -39,11 +39,11 @@ impl Paths { recipe_dir, }; - util::sync::ensure_dir_exists(&job.rootfs().host)?; - util::sync::ensure_dir_exists(&job.artefacts().host)?; - util::sync::ensure_dir_exists(&job.build().host)?; - util::sync::ensure_dir_exists(&job.ccache().host)?; - util::sync::ensure_dir_exists(&job.upstreams().host)?; + util::ensure_dir_exists(&job.rootfs().host)?; + util::ensure_dir_exists(&job.artefacts().host)?; + util::ensure_dir_exists(&job.build().host)?; + util::ensure_dir_exists(&job.ccache().host)?; + util::ensure_dir_exists(&job.upstreams().host)?; Ok(job) } diff --git a/boulder/src/profile.rs b/boulder/src/profile.rs index 31601693b..6eacb8a96 100644 --- a/boulder/src/profile.rs +++ b/boulder/src/profile.rs @@ -92,11 +92,10 @@ pub struct Manager<'a> { } impl<'a> Manager<'a> { - pub async fn new(env: &'a Env) -> Manager<'a> { + pub fn new(env: &'a Env) -> Manager<'a> { let profiles = env .config .load::() - .await .into_iter() .reduce(Map::merge) .unwrap_or_default(); @@ -111,10 +110,10 @@ impl<'a> Manager<'a> { .ok_or_else(|| Error::MissingProfile(profile.clone())) } - pub async fn save_profile(&mut self, id: Id, profile: Profile) -> Result<(), Error> { + pub fn save_profile(&mut self, id: Id, profile: Profile) -> Result<(), Error> { // Save config let map = Map::with([(id.clone(), profile.clone())]); - self.env.config.save(id.clone(), &map).await?; + self.env.config.save(id.clone(), &map)?; // Add to profile map self.profiles.add(id, profile); diff --git a/boulder/src/runtime.rs b/boulder/src/runtime.rs deleted file mode 100644 index 15b24fc9e..000000000 --- a/boulder/src/runtime.rs +++ /dev/null @@ -1,36 +0,0 @@ -// SPDX-FileCopyrightText: Copyright © 2020-2024 Serpent OS Developers -// -// SPDX-License-Identifier: MPL-2.0 - -use std::{future::Future, io, thread, time::Duration}; - -use tokio::runtime; - -pub struct Runtime(runtime::Runtime); - -impl Runtime { - pub fn new() -> io::Result { - Ok(Self(runtime::Builder::new_multi_thread().enable_all().build()?)) - } - - pub fn block_on(&self, task: F) -> T - where - F: Future, - { - self.0.block_on(task) - } - - pub fn destroy(self) { - drop(self); - // We want to ensure no threads exist before - // cloning into container. Sometimes a deadlock - // occurs which appears related to a race condition - // from some thread artifacts still existing. Adding - // this delay allows things to get cleaned up. - // NOTE: This appears to reliably fix the problem, - // I ran boulder 100 times w/ and w/out this delay - // and the deadlock never occured w/ it, but w/out - // it occured within 10 attempts. - thread::sleep(Duration::from_millis(50)); - } -} diff --git a/boulder/src/util.rs b/boulder/src/util.rs index 32120fd60..7d21f94aa 100644 --- a/boulder/src/util.rs +++ b/boulder/src/util.rs @@ -3,67 +3,89 @@ // SPDX-License-Identifier: MPL-2.0 use std::{ - io, + fs, io, num::NonZeroUsize, + os::unix::fs::symlink, path::{Path, PathBuf}, thread, }; -use futures::{future::BoxFuture, FutureExt}; -use tokio::fs::{copy, create_dir_all, read_dir, read_link, remove_dir_all, symlink}; +use nix::unistd::{linkat, LinkatFlags}; use url::Url; -pub async fn ensure_dir_exists(path: &Path) -> Result<(), io::Error> { +pub fn ensure_dir_exists(path: &Path) -> Result<(), io::Error> { if !path.exists() { - create_dir_all(path).await?; + fs::create_dir_all(path)?; } Ok(()) } -pub async fn recreate_dir(path: &Path) -> Result<(), io::Error> { +pub fn recreate_dir(path: &Path) -> Result<(), io::Error> { if path.exists() { - remove_dir_all(path).await?; + fs::remove_dir_all(path)?; } - create_dir_all(path).await?; + fs::create_dir_all(path)?; Ok(()) } -pub fn copy_dir<'a>(source_dir: &'a Path, out_dir: &'a Path) -> BoxFuture<'a, Result<(), io::Error>> { - async move { - recreate_dir(out_dir).await?; +pub fn copy_dir(source_dir: &Path, out_dir: &Path) -> Result<(), io::Error> { + recreate_dir(out_dir)?; - let mut contents = read_dir(&source_dir).await?; + let contents = fs::read_dir(source_dir)?; - while let Some(entry) = contents.next_entry().await? { - let path = entry.path(); + for entry in contents.flatten() { + let path = entry.path(); - if let Some(file_name) = path.file_name() { - let dest = out_dir.join(file_name); - let meta = entry.metadata().await?; + if let Some(file_name) = path.file_name() { + let dest = out_dir.join(file_name); + let meta = entry.metadata()?; - if meta.is_dir() { - copy_dir(&path, &dest).await?; - } else if meta.is_file() { - copy(&path, &dest).await?; - } else if meta.is_symlink() { - symlink(read_link(&path).await?, &dest).await?; - } + if meta.is_dir() { + copy_dir(&path, &dest)?; + } else if meta.is_file() { + fs::copy(&path, &dest)?; + } else if meta.is_symlink() { + symlink(fs::read_link(&path)?, &dest)?; } } + } + + Ok(()) +} + +pub fn enumerate_files<'a>( + dir: &'a Path, + matcher: impl Fn(&Path) -> bool + Send + Copy + 'a, +) -> Result, io::Error> { + use std::fs::read_dir; + + let read_dir = read_dir(dir)?; + + let mut paths = vec![]; + + for entry in read_dir { + let entry = entry?; + let path = entry.path(); + let meta = entry.metadata()?; - Ok(()) + if meta.is_dir() { + paths.extend(enumerate_files(&path, matcher)?); + } else if meta.is_file() && matcher(&path) { + paths.push(path); + } } - .boxed() + + Ok(paths) } -pub async fn list_dirs(dir: &Path) -> Result, io::Error> { - let mut read_dir = read_dir(dir).await?; +pub fn list_dirs(dir: &Path) -> Result, io::Error> { + let read_dir = fs::read_dir(dir)?; let mut paths = vec![]; - while let Some(entry) = read_dir.next_entry().await? { + for entry in read_dir.flatten() { let path = entry.path(); - let meta = entry.metadata().await?; + let meta = entry.metadata()?; if meta.is_dir() { paths.push(path); @@ -73,6 +95,18 @@ pub async fn list_dirs(dir: &Path) -> Result, io::Error> { Ok(paths) } +pub fn hardlink_or_copy(from: &Path, to: &Path) -> Result<(), io::Error> { + // Attempt hard link + let link_result = linkat(None, from, None, to, LinkatFlags::NoSymlinkFollow); + + // Copy instead + if link_result.is_err() { + fs::copy(from, to)?; + } + + Ok(()) +} + pub fn uri_file_name(uri: &Url) -> &str { let path = uri.path(); @@ -94,65 +128,3 @@ pub fn is_root() -> bool { Uid::effective().is_root() } - -pub mod sync { - use std::{ - fs::{copy, create_dir_all, remove_dir_all}, - io, - path::{Path, PathBuf}, - }; - - use nix::unistd::{linkat, LinkatFlags}; - - pub fn ensure_dir_exists(path: &Path) -> Result<(), io::Error> { - if !path.exists() { - create_dir_all(path)?; - } - Ok(()) - } - - pub fn recreate_dir(path: &Path) -> Result<(), io::Error> { - if path.exists() { - remove_dir_all(path)?; - } - create_dir_all(path)?; - Ok(()) - } - - pub fn enumerate_files<'a>( - dir: &'a Path, - matcher: impl Fn(&Path) -> bool + Send + Copy + 'a, - ) -> Result, io::Error> { - use std::fs::read_dir; - - let read_dir = read_dir(dir)?; - - let mut paths = vec![]; - - for entry in read_dir { - let entry = entry?; - let path = entry.path(); - let meta = entry.metadata()?; - - if meta.is_dir() { - paths.extend(enumerate_files(&path, matcher)?); - } else if meta.is_file() && matcher(&path) { - paths.push(path); - } - } - - Ok(paths) - } - - pub fn hardlink_or_copy(from: &Path, to: &Path) -> Result<(), io::Error> { - // Attempt hard link - let link_result = linkat(None, from, None, to, LinkatFlags::NoSymlinkFollow); - - // Copy instead - if link_result.is_err() { - copy(from, to)?; - } - - Ok(()) - } -} diff --git a/crates/config/Cargo.toml b/crates/config/Cargo.toml index 0114487b4..afa8d62e9 100644 --- a/crates/config/Cargo.toml +++ b/crates/config/Cargo.toml @@ -7,9 +7,6 @@ edition.workspace = true [dependencies] dirs.workspace = true -futures.workspace = true serde.workspace = true serde_yaml.workspace = true thiserror.workspace = true -tokio-stream.workspace = true -tokio.workspace = true diff --git a/crates/config/src/lib.rs b/crates/config/src/lib.rs index 56e955b48..b0496afb4 100644 --- a/crates/config/src/lib.rs +++ b/crates/config/src/lib.rs @@ -3,15 +3,12 @@ // SPDX-License-Identifier: MPL-2.0 use std::{ - fmt, + fmt, fs, io, path::{Path, PathBuf}, }; -use futures::StreamExt; use serde::{de::DeserializeOwned, Serialize}; use thiserror::Error; -use tokio::{fs, io}; -use tokio_stream::wrappers::ReadDirStream; const EXTENSION: &str = "yaml"; @@ -55,14 +52,14 @@ impl Manager { } } - pub async fn load(&self) -> Vec { + pub fn load(&self) -> Vec { let domain = T::domain(); let mut configs = vec![]; for (entry, resolve) in self.scope.load_with() { - for path in enumerate_paths(entry, resolve, &domain).await { - if let Some(config) = read_config(path).await { + for path in enumerate_paths(entry, resolve, &domain) { + if let Some(config) = read_config(path) { configs.push(config); } } @@ -71,33 +68,29 @@ impl Manager { configs } - pub async fn save(&self, name: impl fmt::Display, config: &T) -> Result<(), SaveError> { + pub fn save(&self, name: impl fmt::Display, config: &T) -> Result<(), SaveError> { let domain = T::domain(); let dir = self.scope.save_dir(&domain); - fs::create_dir_all(&dir) - .await - .map_err(|io| SaveError::CreateDir(dir.clone(), io))?; + fs::create_dir_all(&dir).map_err(|io| SaveError::CreateDir(dir.clone(), io))?; let path = dir.join(format!("{name}.{EXTENSION}")); let serialized = serde_yaml::to_string(config)?; - fs::write(&path, serialized) - .await - .map_err(|io| SaveError::Write(path, io))?; + fs::write(&path, serialized).map_err(|io| SaveError::Write(path, io))?; Ok(()) } - pub async fn delete(&self, name: impl fmt::Display) -> Result<(), io::Error> { + pub fn delete(&self, name: impl fmt::Display) -> Result<(), io::Error> { let domain = T::domain(); let dir = self.scope.save_dir(&domain); let path = dir.join(format!("{name}.{EXTENSION}")); - fs::remove_file(&path).await?; + fs::remove_file(path)?; Ok(()) } @@ -117,7 +110,7 @@ pub enum SaveError { Write(PathBuf, #[source] io::Error), } -async fn enumerate_paths(entry: Entry, resolve: Resolve<'_>, domain: &str) -> Vec { +fn enumerate_paths(entry: Entry, resolve: Resolve<'_>, domain: &str) -> Vec { match entry { Entry::File => { let file = resolve.file(domain); @@ -129,12 +122,12 @@ async fn enumerate_paths(entry: Entry, resolve: Resolve<'_>, domain: &str) -> Ve } } Entry::Directory => { - if let Ok(read_dir) = fs::read_dir(resolve.dir(domain)).await { - ReadDirStream::new(read_dir) - .filter_map(|entry| async { - let entry = entry.ok()?; + if let Ok(read_dir) = fs::read_dir(resolve.dir(domain)) { + read_dir + .flatten() + .filter_map(|entry| { let path = entry.path(); - let extension = path.extension().and_then(|ext| ext.to_str()).unwrap_or_default(); + let extension = path.extension().and_then(|ext| ext.to_str())?; if path.exists() && extension == EXTENSION { Some(path) @@ -143,7 +136,6 @@ async fn enumerate_paths(entry: Entry, resolve: Resolve<'_>, domain: &str) -> Ve } }) .collect() - .await } else { vec![] } @@ -151,8 +143,8 @@ async fn enumerate_paths(entry: Entry, resolve: Resolve<'_>, domain: &str) -> Ve } } -async fn read_config(path: PathBuf) -> Option { - let bytes = fs::read(path).await.ok()?; +fn read_config(path: PathBuf) -> Option { + let bytes = fs::read(path).ok()?; serde_yaml::from_slice(&bytes).ok() } diff --git a/moss/src/cli/extract.rs b/moss/src/cli/extract.rs index 6e9dd13cf..243194394 100644 --- a/moss/src/cli/extract.rs +++ b/moss/src/cli/extract.rs @@ -14,7 +14,6 @@ use moss::package::{self, MissingMetaFieldError}; use rayon::prelude::{IntoParallelRefIterator, ParallelIterator}; use stone::{payload::layout, read::PayloadKind}; use thiserror::{self, Error}; -use tokio::task; use tui::{ProgressBar, ProgressStyle, ProgressWriter}; pub fn command() -> Command { @@ -25,7 +24,7 @@ pub fn command() -> Command { } /// Handle the `extract` command -pub async fn handle(args: &ArgMatches) -> Result<(), Error> { +pub fn handle(args: &ArgMatches) -> Result<(), Error> { let paths = args .get_many::("PATH") .into_iter() @@ -33,14 +32,6 @@ pub async fn handle(args: &ArgMatches) -> Result<(), Error> { .cloned() .collect::>(); - task::spawn_blocking(move || extract(paths)) - .await - .expect("join handle")?; - - Ok(()) -} - -fn extract(paths: Vec) -> Result<(), Error> { // Begin unpack create_dir_all(".stoneStore")?; diff --git a/moss/src/cli/index.rs b/moss/src/cli/index.rs index 58c582250..588ab7258 100644 --- a/moss/src/cli/index.rs +++ b/moss/src/cli/index.rs @@ -3,20 +3,19 @@ // SPDX-License-Identifier: MPL-2.0 use std::{ collections::{btree_map, BTreeMap}, - io, + fs, io, path::{Path, PathBuf, StripPrefixError}, time::Duration, }; use clap::{arg, value_parser, ArgMatches, Command}; -use futures::{future::BoxFuture, stream, FutureExt, StreamExt, TryStreamExt}; use moss::{ - client, environment, + client, package::{self, Meta, MissingMetaFieldError}, }; +use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use sha2::{Digest, Sha256}; use thiserror::Error; -use tokio::{fs, task}; use tui::{MultiProgress, ProgressBar, ProgressStyle, Stylize}; pub fn command() -> Command { @@ -26,10 +25,10 @@ pub fn command() -> Command { .arg(arg!( "directory of index files").value_parser(value_parser!(PathBuf))) } -pub async fn handle(args: &ArgMatches) -> Result<(), Error> { +pub fn handle(args: &ArgMatches) -> Result<(), Error> { let dir = args.get_one::("INDEX_DIR").unwrap().canonicalize()?; - let stone_files = enumerate_stone_files(&dir).await?; + let stone_files = enumerate_stone_files(&dir)?; println!("Indexing {} files\n", stone_files.len()); @@ -44,11 +43,10 @@ pub async fn handle(args: &ArgMatches) -> Result<(), Error> { ); total_progress.tick(); - let list = stream::iter(&stone_files) + let list = stone_files + .par_iter() .map(|path| get_meta(path, &dir, &multi_progress, &total_progress)) - .buffer_unordered(environment::MAX_DISK_CONCURRENCY) - .try_collect::>() - .await?; + .collect::, _>>()?; let mut map = BTreeMap::new(); @@ -76,7 +74,7 @@ pub async fn handle(args: &ArgMatches) -> Result<(), Error> { } } - write_index(&dir, map, &total_progress).await?; + write_index(&dir, map, &total_progress)?; multi_progress.clear()?; @@ -85,15 +83,7 @@ pub async fn handle(args: &ArgMatches) -> Result<(), Error> { Ok(()) } -async fn write_index( - dir: &Path, - map: BTreeMap, - total_progress: &ProgressBar, -) -> Result<(), Error> { - use std::fs::File; - - let dir = dir.to_path_buf(); - +fn write_index(dir: &Path, map: BTreeMap, total_progress: &ProgressBar) -> Result<(), Error> { total_progress.set_message("Writing index file"); total_progress.set_style( ProgressStyle::with_template("\n {spinner} {wide_msg}") @@ -102,25 +92,21 @@ async fn write_index( ); total_progress.enable_steady_tick(Duration::from_millis(150)); - task::spawn_blocking(move || { - let mut file = File::create(dir.join("stone.index"))?; + let mut file = fs::File::create(dir.join("stone.index"))?; - let mut writer = stone::Writer::new(&mut file, stone::header::v1::FileType::Repository)?; + let mut writer = stone::Writer::new(&mut file, stone::header::v1::FileType::Repository)?; - for (_, meta) in map { - let payload = meta.to_stone_payload(); - writer.add_payload(payload.as_slice())?; - } + for (_, meta) in map { + let payload = meta.to_stone_payload(); + writer.add_payload(payload.as_slice())?; + } - writer.finalize()?; + writer.finalize()?; - Ok(()) - }) - .await - .expect("join handle") + Ok(()) } -async fn get_meta( +fn get_meta( path: &Path, dir: &Path, multi_progress: &MultiProgress, @@ -131,7 +117,7 @@ async fn get_meta( let progress = multi_progress.insert_before(total_progress, ProgressBar::new_spinner()); progress.enable_steady_tick(Duration::from_millis(150)); - let (size, hash) = stat_file(path, &relative_path, &progress).await?; + let (size, hash) = stat_file(path, &relative_path, &progress)?; progress.set_message(format!("{} {}", "Indexing".yellow(), relative_path.clone().bold(),)); progress.set_style( @@ -140,9 +126,9 @@ async fn get_meta( .tick_chars("--=≡■≡=--"), ); - let (_, payloads) = moss::stone::stream_payloads(path).await?; - - let payloads = payloads.try_collect::>().await?; + let mut file = fs::File::open(path)?; + let mut reader = stone::read(&mut file)?; + let payloads = reader.payloads()?.collect::, _>>()?; let payload = payloads .iter() @@ -162,56 +148,42 @@ async fn get_meta( Ok(meta) } -async fn stat_file(path: &Path, relative_path: &str, progress: &ProgressBar) -> Result<(u64, String), Error> { - use std::fs::File; - - let path = path.to_path_buf(); - let relative_path = relative_path.to_string(); - let progress = progress.clone(); - - task::spawn_blocking(move || { - let file = File::open(path)?; - let size = file.metadata()?.len(); +fn stat_file(path: &Path, relative_path: &str, progress: &ProgressBar) -> Result<(u64, String), Error> { + let file = fs::File::open(path)?; + let size = file.metadata()?.len(); - progress.set_length(size); - progress.set_message(format!("{} {}", "Hashing".blue(), relative_path.bold())); - progress.set_style( - ProgressStyle::with_template(" {spinner} |{percent:>3}%| {wide_msg} {binary_bytes_per_sec:>.dim} ") - .unwrap() - .tick_chars("--=≡■≡=--"), - ); + progress.set_length(size); + progress.set_message(format!("{} {}", "Hashing".blue(), relative_path.bold())); + progress.set_style( + ProgressStyle::with_template(" {spinner} |{percent:>3}%| {wide_msg} {binary_bytes_per_sec:>.dim} ") + .unwrap() + .tick_chars("--=≡■≡=--"), + ); - let mut hasher = Sha256::new(); - io::copy(&mut &file, &mut progress.wrap_write(&mut hasher))?; + let mut hasher = Sha256::new(); + io::copy(&mut &file, &mut progress.wrap_write(&mut hasher))?; - let hash = hex::encode(hasher.finalize()); + let hash = hex::encode(hasher.finalize()); - Ok((size, hash)) - }) - .await - .expect("join hande") + Ok((size, hash)) } -fn enumerate_stone_files(dir: &Path) -> BoxFuture, Error>> { - async move { - let mut read_dir = fs::read_dir(dir).await?; +fn enumerate_stone_files(dir: &Path) -> Result, Error> { + let read_dir = fs::read_dir(dir)?; + let mut paths = vec![]; - let mut paths = vec![]; + for entry in read_dir.flatten() { + let path = entry.path(); + let meta = entry.metadata()?; - while let Some(entry) = read_dir.next_entry().await? { - let path = entry.path(); - let meta = entry.metadata().await?; - - if meta.is_dir() { - paths.extend(enumerate_stone_files(&path).await?); - } else if meta.is_file() && path.extension().and_then(|s| s.to_str()) == Some("stone") { - paths.push(path); - } + if meta.is_dir() { + paths.extend(enumerate_stone_files(&path)?); + } else if meta.is_file() && path.extension().and_then(|s| s.to_str()) == Some("stone") { + paths.push(path); } - - Ok(paths) } - .boxed() + + Ok(paths) } #[derive(Debug, Error)] diff --git a/moss/src/cli/info.rs b/moss/src/cli/info.rs index b851aa086..b289cad8c 100644 --- a/moss/src/cli/info.rs +++ b/moss/src/cli/info.rs @@ -5,7 +5,6 @@ use std::path::PathBuf; use clap::{arg, ArgMatches, Command}; -use futures::StreamExt; use itertools::Itertools; use moss::{ client::{self, Client}, @@ -29,7 +28,7 @@ pub fn command() -> Command { } /// For all arguments, try to match a package -pub async fn handle(args: &ArgMatches) -> Result<(), Error> { +pub fn handle(args: &ArgMatches) -> Result<(), Error> { let pkgs = args .get_many::("NAME") .into_iter() @@ -39,15 +38,11 @@ pub async fn handle(args: &ArgMatches) -> Result<(), Error> { let show_files = args.get_flag("files"); let root = args.get_one::("root").unwrap().clone(); - let client = Client::new(environment::NAME, root).await?; + let client = Client::new(environment::NAME, root)?; for pkg in pkgs { let lookup = Provider::from_name(&pkg).unwrap(); - let resolved = client - .registry - .by_provider(&lookup, Flags::NONE) - .collect::>() - .await; + let resolved = client.registry.by_provider(&lookup, Flags::NONE).collect::>(); if resolved.is_empty() { return Err(Error::NotFound(pkg)); } @@ -55,7 +50,7 @@ pub async fn handle(args: &ArgMatches) -> Result<(), Error> { print_package(&candidate); if candidate.flags.contains(Flags::INSTALLED) && show_files { - let vfs = client.vfs([&candidate.id]).await?; + let vfs = client.vfs([&candidate.id])?; print_files(vfs); } println!(); diff --git a/moss/src/cli/inspect.rs b/moss/src/cli/inspect.rs index b84640315..0a667f90b 100644 --- a/moss/src/cli/inspect.rs +++ b/moss/src/cli/inspect.rs @@ -3,12 +3,11 @@ // SPDX-License-Identifier: MPL-2.0 use clap::{arg, ArgMatches, Command}; -use futures::StreamExt; -use moss::stone; -use moss::stone::payload::layout; -use moss::stone::payload::meta; -use moss::stone::read::PayloadKind; +use std::fs::File; use std::path::PathBuf; +use stone::payload::layout; +use stone::payload::meta; +use stone::read::PayloadKind; use thiserror::Error; const COLUMN_WIDTH: usize = 20; @@ -23,7 +22,7 @@ pub fn command() -> Command { /// /// Inspect the given .stone files and print results /// -pub async fn handle(args: &ArgMatches) -> Result<(), Error> { +pub fn handle(args: &ArgMatches) -> Result<(), Error> { let paths = args .get_many::("PATH") .into_iter() @@ -31,20 +30,18 @@ pub async fn handle(args: &ArgMatches) -> Result<(), Error> { .cloned() .collect::>(); - inspect(paths).await -} - -async fn inspect(paths: Vec) -> Result<(), Error> { // Process each input path in order. for path in paths { - let (header, mut payloads) = stone::stream_payloads(&path).await?; + let mut file = File::open(&path)?; + let mut reader = stone::read(&mut file)?; + + let header = reader.header; + let payloads = reader.payloads()?; // Grab the header version print!("{path:?} = stone container version {:?}", header.version()); - while let Some(result) = payloads.next().await { - let payload = result?; - + for payload in payloads.flatten() { let mut layouts = vec![]; // Grab deps/providers/conflicts diff --git a/moss/src/cli/install.rs b/moss/src/cli/install.rs index 2bcd70622..a69e0311b 100644 --- a/moss/src/cli/install.rs +++ b/moss/src/cli/install.rs @@ -27,7 +27,7 @@ pub fn command() -> Command { } /// Handle execution of `moss install` -pub async fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { +pub fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { let pkgs = args .get_many::("NAME") .into_iter() @@ -37,12 +37,12 @@ pub async fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { let yes = *args.get_one::("yes").unwrap(); // Grab a client for the root - let mut client = Client::new(environment::NAME, root).await?; + let mut client = Client::new(environment::NAME, root)?; // Make ephemeral if a blit target was provided if let Some(blit_target) = args.get_one::("to").cloned() { client = client.ephemeral(blit_target)?; } - client.install(&pkgs, yes).await + client.install(&pkgs, yes) } diff --git a/moss/src/cli/list.rs b/moss/src/cli/list.rs index ccbb86dec..59ff5cc24 100644 --- a/moss/src/cli/list.rs +++ b/moss/src/cli/list.rs @@ -5,7 +5,6 @@ use std::path::PathBuf; use clap::{arg, ArgMatches, Command}; -use futures::StreamExt; use itertools::Itertools; use thiserror::Error; @@ -46,7 +45,7 @@ enum Sync { } /// Handle listing by filter -pub async fn handle(args: &ArgMatches) -> Result<(), Error> { +pub fn handle(args: &ArgMatches) -> Result<(), Error> { let root = args.get_one::("root").unwrap().clone(); let (filter_flags, sync) = match args.subcommand() { @@ -72,11 +71,11 @@ pub async fn handle(args: &ArgMatches) -> Result<(), Error> { }; // Grab a client for the target, enumerate packages - let client = Client::new(environment::NAME, root).await?; - let pkgs = client.registry.list(filter_flags).collect::>().await; + let client = Client::new(environment::NAME, root)?; + let pkgs = client.registry.list(filter_flags).collect::>(); let sync_available = if sync.is_some() { - client.registry.list(Flags::AVAILABLE).collect::>().await + client.registry.list(Flags::AVAILABLE).collect::>() } else { vec![] }; diff --git a/moss/src/cli/mod.rs b/moss/src/cli/mod.rs index e6298400c..62d6ea557 100644 --- a/moss/src/cli/mod.rs +++ b/moss/src/cli/mod.rs @@ -5,6 +5,7 @@ use std::{env, path::PathBuf}; use clap::{Arg, ArgAction, Command}; +use moss::runtime; use thiserror::Error; mod extract; @@ -62,7 +63,7 @@ fn command() -> Command { } /// Process all CLI arguments -pub async fn process() -> Result<(), Error> { +pub fn process() -> Result<(), Error> { let args = replace_aliases(env::args()); let matches = command().get_matches_from(args); @@ -73,17 +74,20 @@ pub async fn process() -> Result<(), Error> { let root = matches.get_one::("root").unwrap(); + // Make async runtime available to all of moss + let _guard = runtime::init(); + match matches.subcommand() { - Some(("extract", args)) => extract::handle(args).await.map_err(Error::Extract), - Some(("index", args)) => index::handle(args).await.map_err(Error::Index), - Some(("info", args)) => info::handle(args).await.map_err(Error::Info), - Some(("inspect", args)) => inspect::handle(args).await.map_err(Error::Inspect), - Some(("install", args)) => install::handle(args, root).await.map_err(Error::Install), - Some(("list", args)) => list::handle(args).await.map_err(Error::List), - Some(("remove", args)) => remove::handle(args, root).await.map_err(Error::Remove), - Some(("repo", args)) => repo::handle(args, root).await.map_err(Error::Repo), - Some(("state", args)) => state::handle(args, root).await.map_err(Error::State), - Some(("sync", args)) => sync::handle(args, root).await.map_err(Error::Sync), + Some(("extract", args)) => extract::handle(args).map_err(Error::Extract), + Some(("index", args)) => index::handle(args).map_err(Error::Index), + Some(("info", args)) => info::handle(args).map_err(Error::Info), + Some(("inspect", args)) => inspect::handle(args).map_err(Error::Inspect), + Some(("install", args)) => install::handle(args, root).map_err(Error::Install), + Some(("list", args)) => list::handle(args).map_err(Error::List), + Some(("remove", args)) => remove::handle(args, root).map_err(Error::Remove), + Some(("repo", args)) => repo::handle(args, root).map_err(Error::Repo), + Some(("state", args)) => state::handle(args, root).map_err(Error::State), + Some(("sync", args)) => sync::handle(args, root).map_err(Error::Sync), Some(("version", _)) => { version::print(); Ok(()) diff --git a/moss/src/cli/remove.rs b/moss/src/cli/remove.rs index ef47aac6c..a52055ac1 100644 --- a/moss/src/cli/remove.rs +++ b/moss/src/cli/remove.rs @@ -5,7 +5,6 @@ use std::{collections::HashSet, path::Path}; use clap::{arg, ArgMatches, Command}; -use futures::StreamExt; use itertools::{Either, Itertools}; use moss::{ client::{self, Client}, @@ -31,7 +30,7 @@ pub fn command() -> Command { } /// Handle execution of `moss remove` -pub async fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { +pub fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { let pkgs = args .get_many::("NAME") .into_iter() @@ -41,9 +40,9 @@ pub async fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { let yes = *args.get_one::("yes").unwrap(); // Grab a client for the target, enumerate packages - let client = Client::new(environment::NAME, root).await?; + let client = Client::new(environment::NAME, root)?; - let installed = client.registry.list_installed(Flags::NONE).collect::>().await; + let installed = client.registry.list_installed(Flags::NONE).collect::>(); let installed_ids = installed.iter().map(|p| p.id.clone()).collect::>(); // Separate packages between installed / not installed (or invalid) @@ -65,17 +64,16 @@ pub async fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { // Add all installed packages to transaction let mut transaction = client .registry - .transaction_with_installed(installed_ids.clone().into_iter().collect()) - .await?; + .transaction_with_installed(installed_ids.clone().into_iter().collect())?; // Remove all pkgs for removal - transaction.remove(for_removal).await; + transaction.remove(for_removal); // Finalized tx has all reverse deps removed let finalized = transaction.finalize().cloned().collect::>(); // Resolve all removed packages, where removed is (installed - finalized) - let removed = client.resolve_packages(installed_ids.difference(&finalized)).await?; + let removed = client.resolve_packages(installed_ids.difference(&finalized))?; println!("The following package(s) will be removed:"); println!(); @@ -103,7 +101,7 @@ pub async fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { // it's value from the previous state let new_state_pkgs = { let previous_selections = match client.installation.active_state { - Some(id) => client.state_db.get(&id).await?.selections, + Some(id) => client.state_db.get(&id)?.selections, None => vec![], }; @@ -130,7 +128,7 @@ pub async fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { }; // Apply state - client.apply_state(&new_state_pkgs, "Remove").await?; + client.apply_state(&new_state_pkgs, "Remove")?; Ok(()) } diff --git a/moss/src/cli/repo.rs b/moss/src/cli/repo.rs index cf3f7b2c4..e15a9f67f 100644 --- a/moss/src/cli/repo.rs +++ b/moss/src/cli/repo.rs @@ -8,7 +8,7 @@ use clap::{arg, Arg, ArgAction, ArgMatches, Command}; use itertools::Itertools; use moss::{ repository::{self, Priority}, - Installation, Repository, + runtime, Installation, Repository, }; use thiserror::Error; use url::Url; @@ -75,7 +75,7 @@ pub fn command() -> Command { } /// Handle subcommands to `repo` -pub async fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { +pub fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { let config = config::Manager::system(root, "moss"); let handler = match args.subcommand() { @@ -94,15 +94,15 @@ pub async fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { // dispatch to runtime handler function match handler { - Action::List(root) => list(root, config).await, - Action::Add(root, name, uri, comment, priority) => add(root, config, name, uri, comment, priority).await, - Action::Remove(root, name) => remove(root, config, name).await, - Action::Update(root, name) => update(root, config, name).await, + Action::List(root) => list(root, config), + Action::Add(root, name, uri, comment, priority) => add(root, config, name, uri, comment, priority), + Action::Remove(root, name) => remove(root, config, name), + Action::Update(root, name) => update(root, config, name), } } -// Actual implementation of moss repo add, asynchronous -async fn add( +// Actual implementation of moss repo add +fn add( root: &Path, config: config::Manager, name: String, @@ -112,22 +112,20 @@ async fn add( ) -> Result<(), Error> { let installation = Installation::open(root); - let mut manager = repository::Manager::system(config, installation).await?; + let mut manager = repository::Manager::system(config, installation)?; let id = repository::Id::new(name); - manager - .add_repository( - id.clone(), - Repository { - description: comment, - uri, - priority, - }, - ) - .await?; + manager.add_repository( + id.clone(), + Repository { + description: comment, + uri, + priority, + }, + )?; - manager.refresh_all().await?; + runtime::block_on(manager.refresh(&id))?; println!("{id} added"); @@ -135,9 +133,9 @@ async fn add( } /// List the repositories and pretty print them -async fn list(root: &Path, config: config::Manager) -> Result<(), Error> { +fn list(root: &Path, config: config::Manager) -> Result<(), Error> { let installation = Installation::open(root); - let manager = repository::Manager::system(config, installation).await?; + let manager = repository::Manager::system(config, installation)?; let configured_repos = manager.list(); if configured_repos.len() == 0 { @@ -153,26 +151,28 @@ async fn list(root: &Path, config: config::Manager) -> Result<(), Error> { } /// Update specific repos or all -async fn update(root: &Path, config: config::Manager, which: Option) -> Result<(), Error> { +fn update(root: &Path, config: config::Manager, which: Option) -> Result<(), Error> { let installation = Installation::open(root); - let mut manager = repository::Manager::system(config, installation).await?; + let mut manager = repository::Manager::system(config, installation)?; - match which { - Some(repo) => manager.refresh(&repository::Id::new(repo)).await?, - None => manager.refresh_all().await?, - } + runtime::block_on(async { + match which { + Some(repo) => manager.refresh(&repository::Id::new(repo)).await, + None => manager.refresh_all().await, + } + })?; Ok(()) } /// Remove repo -async fn remove(root: &Path, config: config::Manager, repo: String) -> Result<(), Error> { +fn remove(root: &Path, config: config::Manager, repo: String) -> Result<(), Error> { let id = repository::Id::new(repo); let installation = Installation::open(root); - let mut manager = repository::Manager::system(config, installation).await?; + let mut manager = repository::Manager::system(config, installation)?; - match manager.remove(id.clone()).await? { + match manager.remove(id.clone())? { repository::manager::Removal::NotFound => { println!("{id} not found"); process::exit(1); diff --git a/moss/src/cli/state.rs b/moss/src/cli/state.rs index e1eb65c39..0d23e4260 100644 --- a/moss/src/cli/state.rs +++ b/moss/src/cli/state.rs @@ -5,7 +5,6 @@ use std::path::Path; use clap::{arg, ArgAction, ArgMatches, Command}; -use futures::{stream, StreamExt, TryFutureExt, TryStreamExt}; use moss::{ client::{self, prune, Client}, environment, state, @@ -29,35 +28,35 @@ pub fn command() -> Command { ) } -pub async fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { +pub fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { match args.subcommand() { - Some(("list", _)) => list(root).await, - Some(("prune", args)) => prune(args, root).await, + Some(("list", _)) => list(root), + Some(("prune", args)) => prune(args, root), _ => unreachable!(), } } /// List all known states, newest first -pub async fn list(root: &Path) -> Result<(), Error> { - let client = Client::new(environment::NAME, root).await?; +pub fn list(root: &Path) -> Result<(), Error> { + let client = Client::new(environment::NAME, root)?; - let state_ids = client.state_db.list_ids().await?; + let state_ids = client.state_db.list_ids()?; - let mut states = stream::iter(state_ids.iter().map(|(id, _)| id)) - .then(|id| client.state_db.get(id).map_err(Error::StateDB)) - .try_collect::>() - .await?; + let mut states = state_ids + .iter() + .map(|(id, _)| client.state_db.get(id).map_err(Error::StateDB)) + .collect::, _>>()?; states.reverse(); states.into_iter().for_each(print_state); Ok(()) } -pub async fn prune(args: &ArgMatches, root: &Path) -> Result<(), Error> { +pub fn prune(args: &ArgMatches, root: &Path) -> Result<(), Error> { let keep = *args.get_one::("keep").unwrap(); - let client = Client::new(environment::NAME, root).await?; - client.prune(prune::Strategy::KeepRecent(keep)).await?; + let client = Client::new(environment::NAME, root)?; + client.prune(prune::Strategy::KeepRecent(keep))?; Ok(()) } diff --git a/moss/src/cli/sync.rs b/moss/src/cli/sync.rs index 7c7edc320..be6f06dea 100644 --- a/moss/src/cli/sync.rs +++ b/moss/src/cli/sync.rs @@ -7,8 +7,6 @@ use std::path::PathBuf; use std::{collections::BTreeSet, path::Path}; use clap::{arg, value_parser, ArgMatches, Command}; -use futures::{stream, StreamExt, TryStreamExt}; -use moss::environment; use moss::registry::transaction; use moss::state::Selection; use moss::{ @@ -16,6 +14,7 @@ use moss::{ package::{self, Flags}, Package, }; +use moss::{environment, runtime}; use thiserror::Error; use tui::dialoguer::theme::ColorfulTheme; @@ -39,11 +38,11 @@ pub fn command() -> Command { ) } -pub async fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { +pub fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { let yes_all = *args.get_one::("yes").unwrap(); let upgrade_only = *args.get_one::("upgrade-only").unwrap(); - let mut client = Client::new(environment::NAME, root).await?; + let mut client = Client::new(environment::NAME, root)?; // Make ephemeral if a blit target was provided if let Some(blit_target) = args.get_one::("to").cloned() { @@ -51,11 +50,7 @@ pub async fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { } // Grab all the existing installed packages - let installed = client - .registry - .list_installed(package::Flags::NONE) - .collect::>() - .await; + let installed = client.registry.list_installed(package::Flags::NONE).collect::>(); if installed.is_empty() { return Err(Error::NoInstall); } @@ -68,8 +63,8 @@ pub async fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { // // By resolving only explicit first, this ensures any "orphaned" transitive deps // are naturally dropped from the final state. - let first_pass = resolve_with_sync(&client, Resolution::Explicit, upgrade_only, &installed).await?; - let finalized = resolve_with_sync(&client, Resolution::All, upgrade_only, &first_pass).await?; + let first_pass = resolve_with_sync(&client, Resolution::Explicit, upgrade_only, &installed)?; + let finalized = resolve_with_sync(&client, Resolution::All, upgrade_only, &first_pass)?; // Synced are packages are: // @@ -116,13 +111,13 @@ pub async fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { return Err(Error::Cancelled); } - client.cache_packages(&synced).await?; + runtime::block_on(client.cache_packages(&synced))?; // Map finalized state to a [`Selection`] by referencing // it's value from the previous state let new_selections = { let previous_selections = match client.installation.active_state { - Some(id) => client.state_db.get(&id).await?.selections, + Some(id) => client.state_db.get(&id)?.selections, None => vec![], }; @@ -155,7 +150,7 @@ pub async fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { }; // Perfect, apply state. - client.apply_state(&new_selections, "Sync").await?; + client.apply_state(&new_selections, "Sync")?; Ok(()) } @@ -167,7 +162,7 @@ enum Resolution { /// Return a fully resolved package set w/ sync'd changes swapped in /// using the provided `packages` at the requested [`Resolution`] -async fn resolve_with_sync( +fn resolve_with_sync( client: &Client, resolution: Resolution, upgrade_only: bool, @@ -177,22 +172,15 @@ async fn resolve_with_sync( // For each package, replace it w/ it's sync'd change (if available) // or return the original package - let with_sync = stream::iter(packages.iter()) - .filter(|p| async { - match resolution { - Resolution::Explicit => p.flags.contains(Flags::EXPLICIT), - Resolution::All => true, - } + let with_sync = packages + .iter() + .filter(|p| match resolution { + Resolution::Explicit => p.flags.contains(Flags::EXPLICIT), + Resolution::All => true, }) - .map(|p| async { + .map(|p| { // Get first available = use highest priority - if let Some(lookup) = client - .registry - .by_name(&p.meta.name, package::Flags::AVAILABLE) - .boxed() - .next() - .await - { + if let Some(lookup) = client.registry.by_name(&p.meta.name, package::Flags::AVAILABLE).next() { let upgrade_check = if upgrade_only { lookup.meta.source_release > p.meta.source_release } else { @@ -208,16 +196,14 @@ async fn resolve_with_sync( Err(Error::NameNotFound(p.meta.name.clone())) } }) - .buffer_unordered(environment::MAX_DISK_CONCURRENCY) - .try_collect::>() - .await?; + .collect::, _>>()?; // Build a new tx from this sync'd package set let mut tx = client.registry.transaction()?; - tx.add(with_sync.iter().map(|p| p.id.clone()).collect()).await?; + tx.add(with_sync.iter().map(|p| p.id.clone()).collect())?; // Resolve the tx - Ok(client.resolve_packages(tx.finalize()).await?) + Ok(client.resolve_packages(tx.finalize())?) } #[derive(Debug, Error)] diff --git a/moss/src/client/cache.rs b/moss/src/client/cache.rs index 4fa4a4660..93c175b56 100644 --- a/moss/src/client/cache.rs +++ b/moss/src/client/cache.rs @@ -9,18 +9,16 @@ use std::{ sync::{Arc, Mutex}, }; -use futures::{stream, StreamExt}; +use futures::StreamExt; use stone::{payload, read::PayloadKind}; use thiserror::Error; use tokio::{ fs::{self, File}, io::AsyncWriteExt, - runtime::Handle, - task, }; use url::Url; -use crate::{environment, package, request, Installation}; +use crate::{package, request, Installation}; /// Synchronized set of assets that are currently being /// unpacked. Used to prevent unpacking the same asset @@ -124,7 +122,7 @@ pub struct UnpackedAsset { impl Download { /// Unpack the downloaded package // TODO: Return an "Unpacked" struct which has a "blit" method on it? - pub async fn unpack( + pub fn unpack( self, unpacking_in_progress: UnpackingInProgress, on_progress: impl Fn(Progress) + Send + 'static, @@ -170,101 +168,91 @@ impl Download { } } - let rt = Handle::current(); + let content_dir = self.installation.cache_path("content"); + let content_path = content_dir.join(self.id); - task::spawn_blocking(move || { - let content_dir = self.installation.cache_path("content"); - let content_path = content_dir.join(self.id); + create_dir_all(&content_dir)?; - create_dir_all(&content_dir)?; + let mut reader = stone::read(File::open(&self.path)?)?; - let mut reader = stone::read(File::open(&self.path)?)?; + let payloads = reader.payloads()?.collect::, _>>()?; + let indicies = payloads + .iter() + .filter_map(PayloadKind::index) + .flat_map(|p| &p.body) + .collect::>(); - let payloads = reader.payloads()?.collect::, _>>()?; - let indicies = payloads - .iter() - .filter_map(PayloadKind::index) - .flat_map(|p| &p.body) - .collect::>(); + // If download was cached & all assets exist, we can skip unpacking + if self.was_cached && check_assets_exist(&indicies, &self.installation) { + return Ok(UnpackedAsset { payloads }); + } - // If download was cached & all assets exist, we can skip unpacking - if self.was_cached && rt.block_on(check_assets_exist(&indicies, &self.installation)) { - return Ok(UnpackedAsset { payloads }); - } + let content = payloads + .iter() + .find_map(PayloadKind::content) + .ok_or(Error::MissingContent)?; + + let content_file = File::options() + .read(true) + .write(true) + .create(true) + .open(&content_path)?; + + reader.unpack_content( + content, + &mut ProgressWriter::new(&content_file, content.header.plain_size, &on_progress), + )?; + + indicies + .into_iter() + .map(|idx| { + let path = asset_path(&self.installation, &format!("{:02x}", idx.digest)); + + // If file is already being unpacked by another worker, skip + // to prevent clobbering IO + if !unpacking_in_progress.add(path.clone()) { + return Ok(()); + } - let content = payloads - .iter() - .find_map(PayloadKind::content) - .ok_or(Error::MissingContent)?; - - let content_file = File::options() - .read(true) - .write(true) - .create(true) - .open(&content_path)?; - - reader.unpack_content( - content, - &mut ProgressWriter::new(&content_file, content.header.plain_size, &on_progress), - )?; - - indicies - .into_iter() - .map(|idx| { - let path = asset_path(&self.installation, &format!("{:02x}", idx.digest)); - - // If file is already being unpacked by another worker, skip - // to prevent clobbering IO - if !unpacking_in_progress.add(path.clone()) { - return Ok(()); - } - - // This asset already exists - if path.exists() { - unpacking_in_progress.remove(&path); - return Ok(()); - } - - // Create parent dir - if let Some(parent) = path.parent() { - create_dir_all(parent)?; - } - - // Split file reader over index range - let mut file = &content_file; - file.seek(SeekFrom::Start(idx.start))?; - let mut split_file = (&mut file).take(idx.end - idx.start); - - let mut output = File::create(&path)?; - - copy(&mut split_file, &mut output)?; - - // Remove file from in-progress + // This asset already exists + if path.exists() { unpacking_in_progress.remove(&path); + return Ok(()); + } + + // Create parent dir + if let Some(parent) = path.parent() { + create_dir_all(parent)?; + } - Ok(()) - }) - .collect::, Error>>()?; + // Split file reader over index range + let mut file = &content_file; + file.seek(SeekFrom::Start(idx.start))?; + let mut split_file = (&mut file).take(idx.end - idx.start); - remove_file(&content_path)?; + let mut output = File::create(&path)?; - Ok(UnpackedAsset { payloads }) - }) - .await - .expect("join handle") + copy(&mut split_file, &mut output)?; + + // Remove file from in-progress + unpacking_in_progress.remove(&path); + + Ok(()) + }) + .collect::, Error>>()?; + + remove_file(&content_path)?; + + Ok(UnpackedAsset { payloads }) } } /// Returns true if all assets already exist in the installation -async fn check_assets_exist(indicies: &[&payload::Index], installation: &Installation) -> bool { - stream::iter(indicies) - .map(|index| async move { - let path = asset_path(installation, &format!("{:02x}", index.digest)); - fs::try_exists(path).await.unwrap_or_default() - }) - .buffer_unordered(environment::MAX_DISK_CONCURRENCY) - .all(|exists| async move { exists }) - .await +fn check_assets_exist(indicies: &[&payload::Index], installation: &Installation) -> bool { + indicies.iter().all(|index| { + let path = asset_path(installation, &format!("{:02x}", index.digest)); + path.exists() + }) } pub fn download_path(installation: &Installation, hash: &str) -> Result { diff --git a/moss/src/client/install.rs b/moss/src/client/install.rs index 1c81ac64f..8371a14d7 100644 --- a/moss/src/client/install.rs +++ b/moss/src/client/install.rs @@ -2,7 +2,6 @@ // // SPDX-License-Identifier: MPL-2.0 -use futures::{future::join_all, StreamExt}; use thiserror::Error; use tui::{ dialoguer::{theme::ColorfulTheme, Confirm}, @@ -13,24 +12,25 @@ use crate::{ client::{self, Client}, package::{self, Flags}, registry::transaction, + runtime, state::Selection, Package, Provider, }; -pub async fn install(client: &mut Client, pkgs: &[&str], yes: bool) -> Result<(), Error> { +pub fn install(client: &mut Client, pkgs: &[&str], yes: bool) -> Result<(), Error> { // Resolve input packages - let input = resolve_input(pkgs, client).await?; + let input = resolve_input(pkgs, client)?; // Add all inputs let mut tx = client.registry.transaction()?; - tx.add(input.clone()).await?; + tx.add(input.clone())?; // Resolve transaction to metadata - let resolved = client.resolve_packages(tx.finalize()).await?; + let resolved = client.resolve_packages(tx.finalize())?; // Get installed packages to check against - let installed = client.registry.list_installed(Flags::NONE).collect::>().await; + let installed = client.registry.list_installed(Flags::NONE).collect::>(); let is_installed = |p: &Package| installed.iter().any(|i| i.meta.name == p.meta.name); // Get missing packages that are: @@ -78,13 +78,13 @@ pub async fn install(client: &mut Client, pkgs: &[&str], yes: bool) -> Result<() } // Cache packages - client.cache_packages(&missing).await?; + runtime::block_on(client.cache_packages(&missing))?; // Calculate the new state of packages (old_state + missing) let new_state_pkgs = { // Only use previous state in stateful mode let previous_selections = match client.installation.active_state { - Some(id) if !client.is_ephemeral() => client.state_db.get(&id).await?.selections, + Some(id) if !client.is_ephemeral() => client.state_db.get(&id)?.selections, _ => vec![], }; let missing_selections = missing.iter().map(|p| Selection { @@ -99,22 +99,22 @@ pub async fn install(client: &mut Client, pkgs: &[&str], yes: bool) -> Result<() }; // Perfect, apply state. - client.apply_state(&new_state_pkgs, "Install").await?; + client.apply_state(&new_state_pkgs, "Install")?; Ok(()) } /// Resolves the package arguments as valid input packages. Returns an error /// if any args are invalid. -async fn resolve_input(pkgs: &[&str], client: &Client) -> Result, Error> { +fn resolve_input(pkgs: &[&str], client: &Client) -> Result, Error> { // Parse pkg args into valid / invalid sets - let queried = join_all(pkgs.iter().map(|p| find_packages(p, client))).await; + let queried = pkgs.iter().map(|p| find_packages(p, client)); let mut results = vec![]; for (id, pkg) in queried { if let Some(pkg) = pkg { - results.push(pkg.id.clone()) + results.push(pkg.id) } else { return Err(Error::NoPackage(id)); } @@ -124,16 +124,12 @@ async fn resolve_input(pkgs: &[&str], client: &Client) -> Result(id: &'a str, client: &Client) -> (String, Option) { +fn find_packages(id: &str, client: &Client) -> (String, Option) { let provider = Provider::from_name(id).unwrap(); - let result = client - .registry - .by_provider(&provider, Flags::AVAILABLE) - .collect::>() - .await; + let result = client.registry.by_provider(&provider, Flags::AVAILABLE).next(); // First only, pre-sorted - (id.into(), result.first().cloned()) + (id.into(), result) } #[derive(Debug, Error)] diff --git a/moss/src/client/mod.rs b/moss/src/client/mod.rs index ed455c5d9..13c8e3736 100644 --- a/moss/src/client/mod.rs +++ b/moss/src/client/mod.rs @@ -3,13 +3,13 @@ // SPDX-License-Identifier: MPL-2.0 use std::{ - io, - os::fd::RawFd, + fs, io, + os::{fd::RawFd, unix::fs::symlink}, path::{Path, PathBuf}, time::Duration, }; -use futures::{future::try_join_all, stream, StreamExt, TryStreamExt}; +use futures::{stream, StreamExt, TryStreamExt}; use itertools::Itertools; use nix::{ errno::Errno, @@ -20,7 +20,6 @@ use nix::{ }; use stone::{payload::layout, read::PayloadKind}; use thiserror::Error; -use tokio::fs::{self, create_dir_all, remove_dir_all, remove_file, rename, symlink}; use tui::{MultiProgress, ProgressBar, ProgressStyle, Stylize}; use vfs::tree::{builder::TreeBuilder, BlitFile, Element}; @@ -29,7 +28,7 @@ use self::prune::prune; use crate::{ db, environment, package, registry::plugin::{self, Plugin}, - repository, + repository, runtime, state::{self, Selection}, Installation, Package, Registry, State, }; @@ -57,20 +56,20 @@ pub struct Client { impl Client { /// Construct a new Client - pub async fn new(client_name: impl ToString, root: impl Into) -> Result { - Self::build(client_name, root, None).await + pub fn new(client_name: impl ToString, root: impl Into) -> Result { + Self::build(client_name, root, None) } /// Construct a new Client with explicit repositories - pub async fn with_explicit_repositories( + pub fn with_explicit_repositories( client_name: impl ToString, root: impl Into, repositories: repository::Map, ) -> Result { - Self::build(client_name, root, Some(repositories)).await + Self::build(client_name, root, Some(repositories)) } - async fn build( + fn build( client_name: impl ToString, root: impl Into, repositories: Option, @@ -84,18 +83,17 @@ impl Client { let name = client_name.to_string(); let config = config::Manager::system(&root, "moss"); let installation = Installation::open(root); - let install_db = db::meta::Database::new(installation.db_path("install"), installation.read_only()).await?; - let state_db = db::state::Database::new(&installation).await?; - let layout_db = db::layout::Database::new(&installation).await?; + let install_db = db::meta::Database::new(installation.db_path("install"), installation.read_only())?; + let state_db = db::state::Database::new(&installation)?; + let layout_db = db::layout::Database::new(&installation)?; - let mut repositories = if let Some(repos) = repositories { - repository::Manager::explicit(&name, repos, installation.clone()).await? + let repositories = if let Some(repos) = repositories { + repository::Manager::explicit(&name, repos, installation.clone())? } else { - repository::Manager::system(config.clone(), installation.clone()).await? + repository::Manager::system(config.clone(), installation.clone())? }; - repositories.ensure_all_initialized().await?; - let registry = build_registry(&installation, &repositories, &install_db, &state_db).await?; + let registry = build_registry(&installation, &repositories, &install_db, &state_db)?; Ok(Client { name, @@ -114,8 +112,8 @@ impl Client { matches!(self.scope, Scope::Ephemeral { .. }) } - pub async fn install(&mut self, packages: &[&str], yes: bool) -> Result<(), install::Error> { - install(self, packages, yes).await + pub fn install(&mut self, packages: &[&str], yes: bool) -> Result<(), install::Error> { + install(self, packages, yes) } /// Transition to an ephemeral client that doesn't record state changes @@ -139,25 +137,32 @@ impl Client { }) } + /// Ensures all repositories have been initialized by ensuring their stone indexes + /// are downloaded and added to the meta db + pub async fn ensure_repos_initialized(&mut self) -> Result<(), Error> { + self.repositories.ensure_all_initialized().await?; + self.registry = build_registry(&self.installation, &self.repositories, &self.install_db, &self.state_db)?; + Ok(()) + } + /// Reload all configured repositories and refreshes their index file, then update /// registry with all active repositories. pub async fn refresh_repositories(&mut self) -> Result<(), Error> { // Reload manager if not explicit to pickup config changes // then refresh indexes if !self.repositories.is_explicit() { - self.repositories = repository::Manager::system(self.config.clone(), self.installation.clone()).await? + self.repositories = repository::Manager::system(self.config.clone(), self.installation.clone())? }; self.repositories.refresh_all().await?; // Rebuild registry - self.registry = - build_registry(&self.installation, &self.repositories, &self.install_db, &self.state_db).await?; + self.registry = build_registry(&self.installation, &self.repositories, &self.install_db, &self.state_db)?; Ok(()) } /// Prune states with the provided [`prune::Strategy`] - pub async fn prune(&self, strategy: prune::Strategy) -> Result<(), Error> { + pub fn prune(&self, strategy: prune::Strategy) -> Result<(), Error> { if self.scope.is_ephemeral() { return Err(Error::EphemeralProhibitedOperation); } @@ -168,27 +173,21 @@ impl Client { &self.install_db, &self.layout_db, &self.installation, - ) - .await?; + )?; Ok(()) } /// Resolves the provided id's with the underlying registry, returning /// the first [`Package`] for each id. Packages are sorted by name /// and deduped before returning. - pub async fn resolve_packages( + pub fn resolve_packages<'a>( &self, - packages: impl IntoIterator, + packages: impl IntoIterator, ) -> Result, Error> { - let mut metadata = try_join_all(packages.into_iter().map(|id| async { - self.registry - .by_id(id) - .boxed() - .next() - .await - .ok_or(Error::MissingMetadata(id.clone())) - })) - .await?; + let mut metadata = packages + .into_iter() + .map(|id| self.registry.by_id(id).next().ok_or(Error::MissingMetadata(id.clone()))) + .collect::, _>>()?; metadata.sort_by_key(|p| p.meta.name.to_string()); metadata.dedup_by_key(|p| p.meta.name.to_string()); Ok(metadata) @@ -199,48 +198,44 @@ impl Client { /// Then blit the filesystem, promote it, finally archiving the active ID /// /// Returns `None` if the client is ephemeral - pub async fn apply_state(&self, selections: &[Selection], summary: impl ToString) -> Result, Error> { + pub fn apply_state(&self, selections: &[Selection], summary: impl ToString) -> Result, Error> { let old_state = self.installation.active_state; - let fstree = self - .blit_root(selections.iter().map(|s| &s.package), old_state.map(state::Id::next)) - .await?; + let fstree = self.blit_root(selections.iter().map(|s| &s.package), old_state.map(state::Id::next))?; match &self.scope { Scope::Stateful => { // Add to db - let state = self.state_db.add(selections, Some(summary.to_string()), None).await?; + let state = self.state_db.add(selections, Some(summary.to_string()), None)?; // Write state id { let usr = self.installation.staging_path("usr"); - fs::create_dir_all(&usr).await?; + fs::create_dir_all(&usr)?; let state_path = usr.join(".stateID"); - fs::write(state_path, state.id.to_string()).await?; + fs::write(state_path, state.id.to_string())?; } - record_os_release(&self.installation.staging_dir(), Some(state.id)).await?; + record_os_release(&self.installation.staging_dir(), Some(state.id))?; // Run all of the transaction triggers - let triggers = - postblit::triggers(postblit::TriggerScope::Transaction(&self.installation), &fstree).await?; - create_root_links(&self.installation.isolation_dir()).await?; + let triggers = postblit::triggers(postblit::TriggerScope::Transaction(&self.installation), &fstree)?; + create_root_links(&self.installation.isolation_dir())?; for trigger in triggers { trigger.execute()?; } // Staging is only used with [`Scope::Stateful`] - self.promote_staging().await?; + self.promote_staging()?; // Now we got it staged, we need working rootfs - create_root_links(&self.installation.root).await?; + create_root_links(&self.installation.root)?; if let Some(id) = old_state { - self.archive_state(id).await?; + self.archive_state(id)?; } // At this point we're allowed to run system triggers - let sys_triggers = - postblit::triggers(postblit::TriggerScope::System(&self.installation), &fstree).await?; + let sys_triggers = postblit::triggers(postblit::TriggerScope::System(&self.installation), &fstree)?; for trigger in sys_triggers { trigger.execute()?; } @@ -248,15 +243,15 @@ impl Client { Ok(Some(state)) } Scope::Ephemeral { blit_root } => { - record_os_release(blit_root, None).await?; - create_root_links(blit_root).await?; + record_os_release(blit_root, None)?; + create_root_links(blit_root)?; Ok(None) } } } /// Activate the given state - async fn promote_staging(&self) -> Result<(), Error> { + fn promote_staging(&self) -> Result<(), Error> { if self.scope.is_ephemeral() { return Err(Error::EphemeralProhibitedOperation); } @@ -266,7 +261,7 @@ impl Client { // Create the target tree if !usr_target.try_exists()? { - create_dir_all(&usr_target).await?; + fs::create_dir_all(&usr_target)?; } // Now swap staging with live @@ -295,7 +290,7 @@ impl Client { } /// Archive old states into their respective tree - async fn archive_state(&self, id: state::Id) -> Result<(), Error> { + fn archive_state(&self, id: state::Id) -> Result<(), Error> { if self.scope.is_ephemeral() { return Err(Error::EphemeralProhibitedOperation); } @@ -305,11 +300,11 @@ impl Client { let usr_source = self.installation.staging_path("usr"); if let Some(parent) = usr_target.parent() { if !parent.exists() { - create_dir_all(parent).await?; + fs::create_dir_all(parent)?; } } // hot swap the staging/usr into the root/$id/usr - rename(&usr_source, &usr_target).await?; + fs::rename(usr_source, &usr_target)?; Ok(()) } @@ -356,67 +351,77 @@ impl Client { progress_bar.inc(progress.delta); }) .await?; - let is_cached = download.was_cached; - let package_name = package.meta.name.to_string(); - // Set progress to unpacking - progress_bar.set_message(format!("{} {}", "Unpacking".yellow(), package_name.clone().bold(),)); - progress_bar.set_length(1000); - progress_bar.set_position(0); + // Move rest of blocking code to threadpool - // Unpack and update progress - let unpacked = download - .unpack(unpacking_in_progress.clone(), { + let multi_progress = multi_progress.clone(); + let total_progress = total_progress.clone(); + let unpacking_in_progress = unpacking_in_progress.clone(); + let layout_db = self.layout_db.clone(); + let install_db = self.install_db.clone(); + let package = (*package).clone(); + + runtime::unblock(move || { + let package_name = package.meta.name.to_string(); + + // Set progress to unpacking + progress_bar.set_message(format!("{} {}", "Unpacking".yellow(), package_name.clone().bold(),)); + progress_bar.set_length(1000); + progress_bar.set_position(0); + + // Unpack and update progress + let unpacked = download.unpack(unpacking_in_progress.clone(), { let progress_bar = progress_bar.clone(); move |progress| { progress_bar.set_position((progress.pct() * 1000.0) as u64); } - }) - .await?; - - // Merge layoutdb - progress_bar.set_message(format!("{} {}", "Store layout".white(), package_name.clone().bold())); - // Remove old layout entries for package - self.layout_db.remove(&package.id).await?; - // Add new entries in batches of 1k - for chunk in progress_bar.wrap_iter( - unpacked - .payloads - .iter() - .find_map(PayloadKind::layout) - .map(|p| &p.body) - .ok_or(Error::CorruptedPackage)? - .chunks(environment::DB_BATCH_SIZE), - ) { - let entries = chunk.iter().map(|i| (package.id.clone(), i.clone())).collect_vec(); - self.layout_db.batch_add(entries).await?; - } + })?; + + // Merge layoutdb + progress_bar.set_message(format!("{} {}", "Store layout".white(), package_name.clone().bold())); + // Remove old layout entries for package + layout_db.remove(&package.id)?; + // Add new entries in batches of 1k + for chunk in progress_bar.wrap_iter( + unpacked + .payloads + .iter() + .find_map(PayloadKind::layout) + .map(|p| &p.body) + .ok_or(Error::CorruptedPackage)? + .chunks(environment::DB_BATCH_SIZE), + ) { + let entries = chunk.iter().map(|i| (package.id.clone(), i.clone())).collect_vec(); + layout_db.batch_add(entries)?; + } - // Consume the package in the metadb - self.install_db.add(package.id.clone(), package.meta.clone()).await?; + // Consume the package in the metadb + install_db.add(package.id.clone(), package.meta.clone())?; - // Remove this progress bar - progress_bar.finish(); - multi_progress.remove(&progress_bar); + // Remove this progress bar + progress_bar.finish(); + multi_progress.remove(&progress_bar); - let cached_tag = is_cached - .then_some(format!("{}", " (cached)".dim())) - .unwrap_or_default(); + let cached_tag = is_cached + .then_some(format!("{}", " (cached)".dim())) + .unwrap_or_default(); - // Write installed line - multi_progress.println(format!( - "{} {}{}", - "Installed".green(), - package_name.clone().bold(), - cached_tag, - ))?; + // Write installed line + multi_progress.println(format!( + "{} {}{}", + "Installed".green(), + package_name.clone().bold(), + cached_tag, + ))?; - // Inc total progress by 1 - total_progress.inc(1); + // Inc total progress by 1 + total_progress.inc(1); - Ok(()) as Result<(), Error> + Ok(()) as Result<(), Error> + }) + .await })) // Use max network concurrency since we download files here .buffer_unordered(environment::MAX_NETWORK_CONCURRENCY) @@ -430,10 +435,13 @@ impl Client { } /// Build a [`vfs::Tree`] for the provided packages - pub async fn vfs(&self, packages: impl IntoIterator) -> Result, Error> { + pub fn vfs<'a>( + &self, + packages: impl IntoIterator, + ) -> Result, Error> { let mut tbuild = TreeBuilder::new(); for id in packages.into_iter() { - let layouts = self.layout_db.query(id).await?; + let layouts = self.layout_db.query(id)?; for layout in layouts { tbuild.push(PendingFile { id: id.clone(), layout }); } @@ -444,9 +452,9 @@ impl Client { } /// Blit the packages to a filesystem root - async fn blit_root( + fn blit_root<'a>( &self, - packages: impl IntoIterator, + packages: impl IntoIterator, state_id: Option, ) -> Result, Error> { let progress = ProgressBar::new(1).with_style( @@ -458,7 +466,7 @@ impl Client { progress.enable_steady_tick(Duration::from_millis(150)); progress.tick(); - let tree = self.vfs(packages).await?; + let tree = self.vfs(packages)?; progress.set_length(tree.len()); progress.set_position(0_u64); @@ -472,7 +480,7 @@ impl Client { }; // undirt. - remove_dir_all(&blit_target).await?; + fs::remove_dir_all(&blit_target)?; if let Some(root) = tree.structured() { let _ = mkdir(&blit_target, Mode::from_bits_truncate(0o755)); @@ -572,7 +580,7 @@ impl Client { } /// Add root symlinks & os-release file -async fn create_root_links(root: &Path) -> Result<(), io::Error> { +fn create_root_links(root: &Path) -> Result<(), io::Error> { let links = vec![ ("usr/sbin", "sbin"), ("usr/bin", "bin"), @@ -586,21 +594,21 @@ async fn create_root_links(root: &Path) -> Result<(), io::Error> { let staging_target = root.join(format!("{target}.next")); if staging_target.exists() { - remove_file(&staging_target).await?; + fs::remove_file(&staging_target)?; } if final_target.exists() && final_target.is_symlink() && final_target.read_link()?.to_string_lossy() == source { continue 'linker; } - symlink(source, &staging_target).await?; - rename(staging_target, final_target).await?; + symlink(source, &staging_target)?; + fs::rename(staging_target, final_target)?; } Ok(()) } /// Record the operating system release info -async fn record_os_release(root: &Path, state_id: Option) -> Result<(), Error> { +fn record_os_release(root: &Path, state_id: Option) -> Result<(), Error> { let os_release = format!( r#"NAME="Serpent OS" VERSION="{version}" @@ -616,7 +624,7 @@ BUG_REPORT_URL="https://github.com/serpent-os""#, tx = state_id.unwrap_or_default() ); - fs::write(root.join("usr").join("lib").join("os-release"), os_release).await?; + fs::write(root.join("usr").join("lib").join("os-release"), os_release)?; Ok(()) } @@ -706,14 +714,14 @@ impl ToString for PendingFile { } } -async fn build_registry( +fn build_registry( installation: &Installation, repositories: &repository::Manager, installdb: &db::meta::Database, statedb: &db::state::Database, ) -> Result { let state = match installation.active_state { - Some(id) => Some(statedb.get(&id).await?), + Some(id) => Some(statedb.get(&id)?), None => None, }; diff --git a/moss/src/client/postblit.rs b/moss/src/client/postblit.rs index 3b4aee564..d2f8aaafd 100644 --- a/moss/src/client/postblit.rs +++ b/moss/src/client/postblit.rs @@ -58,7 +58,7 @@ pub(super) struct TriggerRunner<'a> { /// Construct an iterator of executable triggers for the given /// scope, which can be used with nice progress bars. -pub(super) async fn triggers<'a>( +pub(super) fn triggers<'a>( scope: TriggerScope<'a>, fstree: &vfs::tree::Tree, ) -> Result>, Error> { @@ -68,13 +68,11 @@ pub(super) async fn triggers<'a>( let triggers = match scope { TriggerScope::Transaction(install) => config::Manager::custom(install.staging_dir().join(trigger_root)) .load::() - .await .into_iter() .map(|t| t.0) .collect_vec(), TriggerScope::System(install) => config::Manager::custom(install.root.join(trigger_root)) .load::() - .await .into_iter() .map(|t| t.0) .collect_vec(), diff --git a/moss/src/client/prune.rs b/moss/src/client/prune.rs index 1006878c1..5b39ff05a 100644 --- a/moss/src/client/prune.rs +++ b/moss/src/client/prune.rs @@ -4,14 +4,12 @@ use std::{ collections::{HashMap, HashSet}, - io, + fs, io, path::{Path, PathBuf}, }; -use futures::{stream, StreamExt, TryStreamExt}; use itertools::Itertools; use thiserror::Error; -use tokio::{fs, task}; use tui::pretty::print_to_columns; use crate::{client::cache, db, environment, package, state, Installation, State}; @@ -48,14 +46,14 @@ impl Status { /// Prune old states using [`Strategy`] and garbage collect /// all cached data related to those states being removed -pub async fn prune( +pub fn prune( strategy: Strategy, state_db: &db::state::Database, install_db: &db::meta::Database, layout_db: &db::layout::Database, installation: &Installation, ) -> Result<(), Error> { - let state_ids = state_db.list_ids().await?; + let state_ids = state_db.list_ids()?; // Define each state as either Keep or Remove let states_by_status = match strategy { @@ -99,7 +97,7 @@ pub async fn prune( // Get net refcount of each package and collect removal states for status in states_by_status { // Get metadata - let state = state_db.get(status.id()).await?; + let state = state_db.get(status.id())?; // Increment each package state.selections.iter().for_each(|selection| { @@ -130,35 +128,33 @@ pub async fn prune( println!(); // Prune these states / packages from all dbs - prune_databases(&removals, &package_removals, state_db, install_db, layout_db).await?; + prune_databases(&removals, &package_removals, state_db, install_db, layout_db)?; // Remove orphaned downloads remove_orphaned_files( // root installation.cache_path("downloads").join("v1"), // final set of hashes to compare against - install_db.file_hashes().await?, + install_db.file_hashes()?, // path builder using hash |hash| cache::download_path(installation, &hash).ok(), - ) - .await?; + )?; // Remove orphaned assets remove_orphaned_files( // root installation.assets_path("v2"), // final set of hashes to compare against - layout_db.file_hashes().await?, + layout_db.file_hashes()?, // path builder using hash |hash| Some(cache::asset_path(installation, &hash)), - ) - .await?; + )?; Ok(()) } /// Removes the provided states & packages from the databases -async fn prune_databases( +fn prune_databases( states: &[State], packages: &[package::Id], state_db: &db::state::Database, @@ -167,62 +163,57 @@ async fn prune_databases( ) -> Result<(), Error> { for chunk in &states.iter().map(|state| &state.id).chunks(environment::DB_BATCH_SIZE) { // Remove db states - state_db.batch_remove(chunk).await?; + state_db.batch_remove(chunk)?; } for chunk in &packages.iter().chunks(environment::DB_BATCH_SIZE) { // Remove db metadata - install_db.batch_remove(chunk).await?; + install_db.batch_remove(chunk)?; } for chunk in &packages.iter().chunks(environment::DB_BATCH_SIZE) { // Remove db layouts - layout_db.batch_remove(chunk).await?; + layout_db.batch_remove(chunk)?; } Ok(()) } /// Removes all files under `root` that no longer exist in the provided `final_hashes` set -async fn remove_orphaned_files( +fn remove_orphaned_files( root: PathBuf, final_hashes: HashSet, compute_path: impl Fn(String) -> Option, ) -> Result<(), Error> { // Compute hashes to remove by (installed - final) - let installed_hashes = enumerate_file_hashes(&root).await?; + let installed_hashes = enumerate_file_hashes(&root)?; let hashes_to_remove = installed_hashes.difference(&final_hashes); // Remove each and it's parent dir if empty - stream::iter(hashes_to_remove) - .map(|hash| async { - // Compute path to file using hash - let Some(file) = compute_path(hash.clone()) else { - return Ok(()); - }; - - // Remove if it exists - if fs::try_exists(&file).await? { - fs::remove_file(&file).await?; - } + hashes_to_remove.into_iter().try_for_each(|hash| { + // Compute path to file using hash + let Some(file) = compute_path(hash.clone()) else { + return Ok(()); + }; + + // Remove if it exists + if file.exists() { + fs::remove_file(&file)?; + } - // Try to remove leading parent dirs if they're - // now empty - if let Some(parent) = file.parent() { - let _ = remove_empty_dirs(parent, &root).await; - } + // Try to remove leading parent dirs if they're + // now empty + if let Some(parent) = file.parent() { + let _ = remove_empty_dirs(parent, &root); + } - Ok(()) as Result<(), Error> - }) - // Remove w/ concurrency! - .buffer_unordered(environment::MAX_DISK_CONCURRENCY) - .try_collect::<()>() - .await?; + Ok(()) as Result<(), Error> + })?; Ok(()) } /// Returns all nested files under `root` and parses the file name as a hash -async fn enumerate_file_hashes(root: impl Into) -> Result, io::Error> { - let files = enumerate_files(root).await?; +fn enumerate_file_hashes(root: impl AsRef) -> Result, io::Error> { + let files = enumerate_files(root)?; let path_to_hash = |path: PathBuf| { path.file_name() @@ -235,9 +226,7 @@ async fn enumerate_file_hashes(root: impl Into) -> Result) -> Result, io::Error> { - use std::fs; - +fn enumerate_files(root: impl AsRef) -> Result, io::Error> { use rayon::prelude::*; fn recurse(dir: impl AsRef) -> io::Result> { @@ -266,15 +255,13 @@ async fn enumerate_files(root: impl Into) -> Result, io::E Ok(files.into_iter().chain(nested_files).collect()) } - let root = root.into(); - - task::spawn_blocking(|| recurse(root)).await.expect("join handle") + recurse(root) } /// Remove all empty folders from `starting` and moving up until `root` /// /// `root` must be a prefix / ancestor of `starting` -async fn remove_empty_dirs(starting: &Path, root: &Path) -> Result<(), io::Error> { +fn remove_empty_dirs(starting: &Path, root: &Path) -> Result<(), io::Error> { if !starting.starts_with(root) || !starting.is_dir() || !root.is_dir() { return Ok(()); } @@ -282,14 +269,14 @@ async fn remove_empty_dirs(starting: &Path, root: &Path) -> Result<(), io::Error let mut current = Some(starting); while let Some(dir) = current.take() { - if fs::try_exists(dir).await? { - let is_empty = fs::read_dir(&dir).await?.next_entry().await?.is_none(); + if dir.exists() { + let is_empty = fs::read_dir(dir)?.count() == 0; if !is_empty { return Ok(()); } - fs::remove_dir(&dir).await?; + fs::remove_dir(dir)?; } if let Some(parent) = dir.parent() { diff --git a/moss/src/config.rs b/moss/src/config.rs deleted file mode 100644 index 1ab889e16..000000000 --- a/moss/src/config.rs +++ /dev/null @@ -1,158 +0,0 @@ -// SPDX-FileCopyrightText: Copyright © 2020-2024 Serpent OS Developers -// -// SPDX-License-Identifier: MPL-2.0 - -use futures::StreamExt; -use std::{ - fmt, - path::{Path, PathBuf}, -}; -use tokio::{fs, io}; - -use serde::{de::DeserializeOwned, Serialize}; -use thiserror::Error; -use tokio_stream::wrappers::ReadDirStream; - -const EXTENSION: &str = "conf"; - -pub trait Config: DeserializeOwned { - fn domain() -> String; - - fn merge(self, other: Self) -> Self; -} - -pub async fn load(root: impl AsRef) -> Option { - let domain = T::domain(); - - let mut configs = vec![]; - - for (base, search) in [ - (Base::Vendor, Search::File), - (Base::Vendor, Search::Directory), - (Base::Admin, Search::File), - (Base::Admin, Search::Directory), - ] { - for path in enumerate_paths(search, &root, base, &domain).await { - if let Some(config) = read_config(path).await { - configs.push(config); - } - } - } - - configs.into_iter().reduce(T::merge) -} - -pub async fn save( - root: impl AsRef, - name: impl fmt::Display, - config: &T, -) -> Result<(), SaveError> { - let domain = T::domain(); - - let dir = domain_dir(root, Base::Admin, &domain); - - fs::create_dir_all(&dir) - .await - .map_err(|io| SaveError::CreateDir(dir.clone(), io))?; - - let path = dir.join(format!("{name}.{EXTENSION}")); - - let serialized = serde_yaml::to_string(config)?; - - fs::write(&path, serialized) - .await - .map_err(|io| SaveError::Write(path, io))?; - - Ok(()) -} - -#[derive(Debug, Error)] -pub enum SaveError { - #[error("create config dir {0:?}")] - CreateDir(PathBuf, #[source] io::Error), - #[error("serialize config")] - Yaml(#[from] serde_yaml::Error), - #[error("write config file {0:?}")] - Write(PathBuf, #[source] io::Error), -} - -async fn enumerate_paths( - search: Search, - root: &impl AsRef, - base: Base, - domain: &str, -) -> Vec { - match search { - Search::File => { - let file = domain_file(root, base, domain); - - if file.exists() { - vec![file] - } else { - vec![] - } - } - Search::Directory => { - if let Ok(read_dir) = fs::read_dir(domain_dir(root, base, domain)).await { - ReadDirStream::new(read_dir) - .filter_map(|entry| async { - let entry = entry.ok()?; - let path = entry.path(); - let extension = path - .extension() - .and_then(|ext| ext.to_str()) - .unwrap_or_default(); - - if path.exists() && extension == EXTENSION { - Some(path) - } else { - None - } - }) - .collect() - .await - } else { - vec![] - } - } - } -} - -fn domain_file(root: impl AsRef, base: Base, domain: &str) -> PathBuf { - root.as_ref() - .join(base.path()) - .join("moss") - .join(format!("{domain}.{EXTENSION}")) -} - -fn domain_dir(root: impl AsRef, base: Base, domain: &str) -> PathBuf { - root.as_ref() - .join(base.path()) - .join("moss") - .join(format!("{domain}.{EXTENSION}.d")) -} - -async fn read_config(path: PathBuf) -> Option { - let bytes = fs::read(path).await.ok()?; - serde_yaml::from_slice(&bytes).ok() -} - -#[derive(Clone, Copy)] -enum Base { - Admin, - Vendor, -} - -impl Base { - fn path(&self) -> &'static str { - match self { - Base::Admin => "etc", - Base::Vendor => "usr/share", - } - } -} - -enum Search { - File, - Directory, -} diff --git a/moss/src/db/layout/mod.rs b/moss/src/db/layout/mod.rs index 7decce993..3747b5c6b 100644 --- a/moss/src/db/layout/mod.rs +++ b/moss/src/db/layout/mod.rs @@ -5,21 +5,19 @@ use std::collections::HashSet; use sqlx::sqlite::SqliteConnectOptions; -use sqlx::{Pool, Sqlite}; use stone::payload; use thiserror::Error; -use tokio::sync::Mutex; -use crate::package; -use crate::Installation; +use super::Pool; +use crate::{package, runtime, Installation}; -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct Database { - pool: Mutex>, + pool: Pool, } impl Database { - pub async fn new(installation: &Installation) -> Result { + pub fn new(installation: &Installation) -> Result { let path = installation.db_path("layout"); let options = sqlx::sqlite::SqliteConnectOptions::new() @@ -28,39 +26,96 @@ impl Database { .read_only(installation.read_only()) .foreign_keys(true); - Self::connect(options).await + Self::connect(options) } - async fn connect(options: SqliteConnectOptions) -> Result { - let pool = sqlx::SqlitePool::connect_with(options).await?; + fn connect(options: SqliteConnectOptions) -> Result { + runtime::block_on(async { + let pool = sqlx::SqlitePool::connect_with(options).await?; + sqlx::migrate!("src/db/layout/migrations").run(&pool).await?; + Ok(pool) + }) + .map(|pool| Self { pool: Pool::new(pool) }) + } + + pub fn all(&self) -> Result, Error> { + self.pool.exec(|pool| async move { + let layouts = sqlx::query_as::<_, encoding::Layout>( + " + SELECT package_id, + uid, + gid, + mode, + tag, + entry_type, + entry_value1, + entry_value2 + FROM layout; + ", + ) + .fetch_all(&pool) + .await?; + + Ok(layouts + .into_iter() + .filter_map(|layout| { + let encoding::Layout { + package_id, + uid, + gid, + mode, + tag, + entry_type, + entry_value1, + entry_value2, + } = layout; + + let entry = encoding::decode_entry(entry_type, entry_value1, entry_value2)?; + + Some(( + package_id, + payload::Layout { + uid, + gid, + mode, + tag, + entry, + }, + )) + }) + .collect()) + }) + } - sqlx::migrate!("src/db/layout/migrations").run(&pool).await?; + pub fn file_hashes(&self) -> Result, Error> { + self.pool.exec(|pool| async move { + let layouts = sqlx::query_as::<_, (String,)>( + " + SELECT DISTINCT entry_value1 + FROM layout + WHERE entry_type = 'regular'; + ", + ) + .fetch_all(&pool) + .await?; - Ok(Self { pool: Mutex::new(pool) }) + Ok(layouts + .into_iter() + .filter_map(|(hash,)| hash.parse::().ok().map(|hash| format!("{hash:02x}"))) + .collect()) + }) } - pub async fn all(&self) -> Result, Error> { - let pool = self.pool.lock().await; - let layouts = sqlx::query_as::<_, encoding::Layout>( - " - SELECT package_id, - uid, - gid, - mode, - tag, - entry_type, - entry_value1, - entry_value2 - FROM layout; - ", - ) - .fetch_all(&*pool) - .await?; - - Ok(layouts - .into_iter() - .filter_map(|layout| { - let encoding::Layout { + pub fn add(&self, package: package::Id, layout: payload::Layout) -> Result<(), Error> { + self.batch_add(vec![(package, layout)]) + } + + pub fn batch_add(&self, layouts: Vec<(package::Id, payload::Layout)>) -> Result<(), Error> { + self.pool.exec(|pool| async move { + sqlx::QueryBuilder::new( + " + INSERT INTO layout + ( package_id, uid, gid, @@ -68,123 +123,68 @@ impl Database { tag, entry_type, entry_value1, - entry_value2, + entry_value2 + ) + ", + ) + .push_values(layouts, |mut b, (id, layout)| { + let payload::Layout { + uid, + gid, + mode, + tag, + entry, } = layout; - let entry = encoding::decode_entry(entry_type, entry_value1, entry_value2)?; + let (entry_type, entry_value1, entry_value2) = encoding::encode_entry(entry); - Some(( - package_id, - payload::Layout { - uid, - gid, - mode, - tag, - entry, - }, - )) + b.push_bind(id.to_string()) + .push_bind(uid) + .push_bind(gid) + .push_bind(mode) + .push_bind(tag) + .push_bind(entry_type) + .push_bind(entry_value1) + .push_bind(entry_value2); }) - .collect()) - } + .build() + .execute(&pool) + .await?; - pub async fn file_hashes(&self) -> Result, Error> { - let pool = self.pool.lock().await; - let layouts = sqlx::query_as::<_, (String,)>( - " - SELECT DISTINCT entry_value1 - FROM layout - WHERE entry_type = 'regular'; - ", - ) - .fetch_all(&*pool) - .await?; - - Ok(layouts - .into_iter() - .filter_map(|(hash,)| hash.parse::().ok().map(|hash| format!("{hash:02x}"))) - .collect()) - } - - pub async fn add(&self, package: package::Id, layout: payload::Layout) -> Result<(), Error> { - self.batch_add(vec![(package, layout)]).await - } - - pub async fn batch_add(&self, layouts: Vec<(package::Id, payload::Layout)>) -> Result<(), Error> { - let pool = self.pool.lock().await; - - sqlx::QueryBuilder::new( - " - INSERT INTO layout - ( - package_id, - uid, - gid, - mode, - tag, - entry_type, - entry_value1, - entry_value2 - ) - ", - ) - .push_values(layouts, |mut b, (id, layout)| { - let payload::Layout { - uid, - gid, - mode, - tag, - entry, - } = layout; - - let (entry_type, entry_value1, entry_value2) = encoding::encode_entry(entry); - - b.push_bind(id.to_string()) - .push_bind(uid) - .push_bind(gid) - .push_bind(mode) - .push_bind(tag) - .push_bind(entry_type) - .push_bind(entry_value1) - .push_bind(entry_value2); + Ok(()) }) - .build() - .execute(&*pool) - .await?; - - Ok(()) } - pub async fn remove(&self, package: &package::Id) -> Result<(), Error> { - self.batch_remove(Some(package)).await + pub fn remove(&self, package: &package::Id) -> Result<(), Error> { + self.batch_remove(Some(package)) } - pub async fn batch_remove(&self, packages: impl IntoIterator) -> Result<(), Error> { - let pool = self.pool.lock().await; - - let mut query = sqlx::QueryBuilder::new( - " - DELETE FROM layout - WHERE package_id IN ( - ", - ); + pub fn batch_remove<'a>(&self, packages: impl IntoIterator) -> Result<(), Error> { + self.pool.exec(|pool| async move { + let mut query = sqlx::QueryBuilder::new( + " + DELETE FROM layout + WHERE package_id IN ( + ", + ); - let mut separated = query.separated(", "); - packages.into_iter().for_each(|pkg| { - separated.push_bind(pkg.to_string()); - }); - separated.push_unseparated(");"); + let mut separated = query.separated(", "); + packages.into_iter().for_each(|pkg| { + separated.push_bind(pkg.to_string()); + }); + separated.push_unseparated(");"); - query.build().execute(&*pool).await?; + query.build().execute(&pool).await?; - Ok(()) + Ok(()) + }) } /// Retrieve all entries for a given package by ID - pub async fn query(&self, package: &package::Id) -> Result, Error> { - let pool = self.pool.lock().await; - - let query = sqlx::query_as::<_, encoding::Layout>( - "SELECT package_id, + pub fn query(&self, package: &package::Id) -> Result, Error> { + self.pool.exec(|pool| async move { + let query = sqlx::query_as::<_, encoding::Layout>( + "SELECT package_id, uid, gid, mode, @@ -192,37 +192,38 @@ impl Database { entry_type, entry_value1, entry_value2 - FROM layout WHERE package_id = ?", - ) - .bind(package.to_string()); + FROM layout WHERE package_id = ?", + ) + .bind(package.to_string()); - let layouts = query.fetch_all(&*pool).await?; + let layouts = query.fetch_all(&pool).await?; - Ok(layouts - .into_iter() - .filter_map(|layout| { - let encoding::Layout { - package_id, - uid, - gid, - mode, - tag, - entry_type, - entry_value1, - entry_value2, - } = layout; + Ok(layouts + .into_iter() + .filter_map(|layout| { + let encoding::Layout { + package_id, + uid, + gid, + mode, + tag, + entry_type, + entry_value1, + entry_value2, + } = layout; - let entry = encoding::decode_entry(entry_type, entry_value1, entry_value2)?; + let entry = encoding::decode_entry(entry_type, entry_value1, entry_value2)?; - Some(payload::Layout { - uid, - gid, - mode, - tag, - entry, + Some(payload::Layout { + uid, + gid, + mode, + tag, + entry, + }) }) - }) - .collect()) + .collect()) + }) } } @@ -300,11 +301,10 @@ mod test { use super::*; - #[tokio::test] - async fn create_insert_select() { - let database = Database::connect(SqliteConnectOptions::from_str("sqlite::memory:").unwrap()) - .await - .unwrap(); + fn create_insert_select() { + let _guard = runtime::init(); + + let database = Database::connect(SqliteConnectOptions::from_str("sqlite::memory:").unwrap()).unwrap(); let bash_completion = include_bytes!("../../../../test/bash-completion-2.11-1-1-x86_64.stone"); @@ -321,9 +321,9 @@ mod test { let count = layouts.len(); - database.batch_add(layouts).await.unwrap(); + database.batch_add(layouts).unwrap(); - let all = database.all().await.unwrap(); + let all = database.all().unwrap(); assert_eq!(count, all.len()); } diff --git a/moss/src/db/meta/mod.rs b/moss/src/db/meta/mod.rs index 0319b6fc4..22b4be580 100644 --- a/moss/src/db/meta/mod.rs +++ b/moss/src/db/meta/mod.rs @@ -4,15 +4,14 @@ use std::collections::HashSet; use std::path::Path; -use std::sync::Arc; -use sqlx::{sqlite::SqliteConnectOptions, Acquire, Pool, Sqlite}; -use sqlx::{Executor, QueryBuilder}; +use sqlx::{sqlite::SqliteConnectOptions, Acquire}; +use sqlx::{Executor, QueryBuilder, Sqlite}; use thiserror::Error; -use tokio::sync::Mutex; +use super::Pool; use crate::package::{self, Meta}; -use crate::{Dependency, Provider}; +use crate::{runtime, Dependency, Provider}; #[derive(Debug, Clone, Copy)] enum Table { @@ -101,368 +100,367 @@ impl Filter { #[derive(Debug, Clone)] pub struct Database { - pool: Arc>>, + pool: Pool, } impl Database { - pub async fn new(path: impl AsRef, read_only: bool) -> Result { + pub fn new(path: impl AsRef, read_only: bool) -> Result { let options = sqlx::sqlite::SqliteConnectOptions::new() .filename(path) .create_if_missing(true) .read_only(read_only) .foreign_keys(true); - Self::connect(options).await + Self::connect(options) } - async fn connect(options: SqliteConnectOptions) -> Result { - let pool = sqlx::SqlitePool::connect_with(options).await?; - - sqlx::migrate!("src/db/meta/migrations").run(&pool).await?; - - Ok(Self { - pool: Arc::new(Mutex::new(pool)), + fn connect(options: SqliteConnectOptions) -> Result { + runtime::block_on(async { + let pool = sqlx::SqlitePool::connect_with(options).await?; + sqlx::migrate!("src/db/meta/migrations").run(&pool).await?; + Ok(pool) }) + .map(|pool| Self { pool: Pool::new(pool) }) } - pub async fn wipe(&self) -> Result<(), Error> { - let pool = self.pool.lock().await; - // Other tables cascade delete so we only need to truncate `meta` - sqlx::query("DELETE FROM meta;").execute(&*pool).await?; - Ok(()) + pub fn wipe(&self) -> Result<(), Error> { + self.pool.exec(|pool| async move { + // Other tables cascade delete so we only need to truncate `meta` + sqlx::query("DELETE FROM meta;").execute(&pool).await?; + Ok(()) + }) } - pub async fn query(&self, filter: Option) -> Result, Error> { - let pool = self.pool.lock().await; - - let mut entry_query = sqlx::QueryBuilder::new( - " - SELECT package, - name, - version_identifier, - source_release, - build_release, - architecture, - summary, - description, - source_id, - homepage, - uri, - hash, - download_size - FROM meta - ", - ); - - let mut licenses_query = sqlx::QueryBuilder::new( - " - SELECT package, license - FROM meta_licenses - ", - ); - - let mut dependencies_query = sqlx::QueryBuilder::new( - " - SELECT package, dependency - FROM meta_dependencies + pub fn query(&self, filter: Option) -> Result, Error> { + self.pool.exec(|pool| async move { + let mut entry_query = sqlx::QueryBuilder::new( + " + SELECT package, + name, + version_identifier, + source_release, + build_release, + architecture, + summary, + description, + source_id, + homepage, + uri, + hash, + download_size + FROM meta ", - ); + ); - let mut providers_query = sqlx::QueryBuilder::new( - " - SELECT package, provider - FROM meta_providers - ", - ); + let mut licenses_query = sqlx::QueryBuilder::new( + " + SELECT package, license + FROM meta_licenses + ", + ); - if let Some(filter) = filter { - filter.append(Table::Meta, &mut entry_query); - filter.append(Table::Licenses, &mut licenses_query); - filter.append(Table::Dependencies, &mut dependencies_query); - filter.append(Table::Providers, &mut providers_query); - } + let mut dependencies_query = sqlx::QueryBuilder::new( + " + SELECT package, dependency + FROM meta_dependencies + ", + ); - let entries = entry_query - .build_query_as::() - .fetch_all(&*pool) - .await?; - let licenses = licenses_query - .build_query_as::() - .fetch_all(&*pool) - .await?; - let dependencies = dependencies_query - .build_query_as::() - .fetch_all(&*pool) - .await?; - let providers = providers_query - .build_query_as::() - .fetch_all(&*pool) - .await?; + let mut providers_query = sqlx::QueryBuilder::new( + " + SELECT package, provider + FROM meta_providers + ", + ); - Ok(entries - .into_iter() - .map(|entry| { - ( - entry.id.clone(), - Meta { - name: entry.name, - version_identifier: entry.version_identifier, - source_release: entry.source_release as u64, - build_release: entry.build_release as u64, - architecture: entry.architecture, - summary: entry.summary, - description: entry.description, - source_id: entry.source_id, - homepage: entry.homepage, - licenses: licenses - .iter() - .filter(|l| l.id == entry.id) - .map(|l| l.license.clone()) - .collect(), - dependencies: dependencies - .iter() - .filter(|l| l.id == entry.id) - .map(|d| d.dependency.clone()) - .collect(), - providers: providers - .iter() - .filter(|l| l.id == entry.id) - .map(|p| p.provider.clone()) - .collect(), - uri: entry.uri, - hash: entry.hash, - download_size: entry.download_size.map(|i| i as u64), - }, - ) - }) - .collect()) - } + if let Some(filter) = filter { + filter.append(Table::Meta, &mut entry_query); + filter.append(Table::Licenses, &mut licenses_query); + filter.append(Table::Dependencies, &mut dependencies_query); + filter.append(Table::Providers, &mut providers_query); + } - pub async fn get(&self, package: &package::Id) -> Result { - let pool = self.pool.lock().await; - - let entry_query = sqlx::query_as::<_, encoding::Entry>( - " - SELECT package, - name, - version_identifier, - source_release, - build_release, - architecture, - summary, - description, - source_id, - homepage, - uri, - hash, - download_size - FROM meta - WHERE package = ?; - ", - ) - .bind(package.to_string()); - - let licenses_query = sqlx::query_as::<_, encoding::License>( - " - SELECT package, license - FROM meta_licenses - WHERE package = ?; - ", - ) - .bind(package.to_string()); - - let dependencies_query = sqlx::query_as::<_, encoding::Dependency>( - " - SELECT package, dependency - FROM meta_dependencies - WHERE package = ?; - ", - ) - .bind(package.to_string()); - - let providers_query = sqlx::query_as::<_, encoding::Provider>( - " - SELECT package, provider - FROM meta_providers - WHERE package = ?; - ", - ) - .bind(package.to_string()); - - let entry = entry_query.fetch_one(&*pool).await?; - let licenses = licenses_query.fetch_all(&*pool).await?; - let dependencies = dependencies_query.fetch_all(&*pool).await?; - let providers = providers_query.fetch_all(&*pool).await?; - - Ok(Meta { - name: entry.name, - version_identifier: entry.version_identifier, - source_release: entry.source_release as u64, - build_release: entry.build_release as u64, - architecture: entry.architecture, - summary: entry.summary, - description: entry.description, - source_id: entry.source_id, - homepage: entry.homepage, - licenses: licenses.into_iter().map(|l| l.license).collect(), - dependencies: dependencies.into_iter().map(|d| d.dependency).collect(), - providers: providers.into_iter().map(|p| p.provider).collect(), - uri: entry.uri, - hash: entry.hash, - download_size: entry.download_size.map(|i| i as u64), + let entries = entry_query.build_query_as::().fetch_all(&pool).await?; + let licenses = licenses_query + .build_query_as::() + .fetch_all(&pool) + .await?; + let dependencies = dependencies_query + .build_query_as::() + .fetch_all(&pool) + .await?; + let providers = providers_query + .build_query_as::() + .fetch_all(&pool) + .await?; + + Ok(entries + .into_iter() + .map(|entry| { + ( + entry.id.clone(), + Meta { + name: entry.name, + version_identifier: entry.version_identifier, + source_release: entry.source_release as u64, + build_release: entry.build_release as u64, + architecture: entry.architecture, + summary: entry.summary, + description: entry.description, + source_id: entry.source_id, + homepage: entry.homepage, + licenses: licenses + .iter() + .filter(|l| l.id == entry.id) + .map(|l| l.license.clone()) + .collect(), + dependencies: dependencies + .iter() + .filter(|l| l.id == entry.id) + .map(|d| d.dependency.clone()) + .collect(), + providers: providers + .iter() + .filter(|l| l.id == entry.id) + .map(|p| p.provider.clone()) + .collect(), + uri: entry.uri, + hash: entry.hash, + download_size: entry.download_size.map(|i| i as u64), + }, + ) + }) + .collect()) }) } - pub async fn file_hashes(&self) -> Result, Error> { - let pool = self.pool.lock().await; - let hashes = sqlx::query_as::<_, (String,)>( - " - SELECT DISTINCT hash - FROM meta - WHERE hash IS NOT NULL; - ", - ) - .fetch_all(&*pool) - .await?; - - Ok(hashes.into_iter().map(|(hash,)| hash).collect()) - } + pub fn get(&self, package: &package::Id) -> Result { + self.pool.exec(|pool| async move { + let entry_query = sqlx::query_as::<_, encoding::Entry>( + " + SELECT package, + name, + version_identifier, + source_release, + build_release, + architecture, + summary, + description, + source_id, + homepage, + uri, + hash, + download_size + FROM meta + WHERE package = ?; + ", + ) + .bind(package.to_string()); - pub async fn add(&self, id: package::Id, meta: Meta) -> Result<(), Error> { - self.batch_add(vec![(id, meta)]).await - } + let licenses_query = sqlx::query_as::<_, encoding::License>( + " + SELECT package, license + FROM meta_licenses + WHERE package = ?; + ", + ) + .bind(package.to_string()); - pub async fn batch_add(&self, packages: Vec<(package::Id, Meta)>) -> Result<(), Error> { - let pool = self.pool.lock().await; - let mut transaction = pool.begin().await?; - - // Remove package (other tables cascade) - batch_remove_impl(packages.iter().map(|(id, _)| id), transaction.acquire().await?).await?; - - // Create entry - sqlx::QueryBuilder::new( - " - INSERT INTO meta ( - package, - name, - version_identifier, - source_release, - build_release, - architecture, - summary, - description, - source_id, - homepage, - uri, - hash, - download_size + let dependencies_query = sqlx::query_as::<_, encoding::Dependency>( + " + SELECT package, dependency + FROM meta_dependencies + WHERE package = ?; + ", ) - ", - ) - .push_values(&packages, |mut b, (id, meta)| { - let Meta { - name, - version_identifier, - source_release, - build_release, - architecture, - summary, - description, - source_id, - homepage, - uri, - hash, - download_size, - .. - } = meta; - - b.push_bind(id.to_string()) - .push_bind(name.to_string()) - .push_bind(version_identifier) - .push_bind(*source_release as i64) - .push_bind(*build_release as i64) - .push_bind(architecture) - .push_bind(summary) - .push_bind(description) - .push_bind(source_id) - .push_bind(homepage) - .push_bind(uri) - .push_bind(hash) - .push_bind(download_size.map(|i| i as i64)); - }) - .build() - .execute(transaction.acquire().await?) - .await?; - - // Licenses - let licenses = packages - .iter() - .flat_map(|(id, meta)| meta.licenses.iter().map(move |license| (id, license))) - .collect::>(); - if !licenses.is_empty() { - sqlx::QueryBuilder::new( + .bind(package.to_string()); + + let providers_query = sqlx::query_as::<_, encoding::Provider>( " - INSERT INTO meta_licenses (package, license) + SELECT package, provider + FROM meta_providers + WHERE package = ?; ", ) - .push_values(licenses, |mut b, (id, license)| { - b.push_bind(id.to_string()).push_bind(license); + .bind(package.to_string()); + + let entry = entry_query.fetch_one(&pool).await?; + let licenses = licenses_query.fetch_all(&pool).await?; + let dependencies = dependencies_query.fetch_all(&pool).await?; + let providers = providers_query.fetch_all(&pool).await?; + + Ok(Meta { + name: entry.name, + version_identifier: entry.version_identifier, + source_release: entry.source_release as u64, + build_release: entry.build_release as u64, + architecture: entry.architecture, + summary: entry.summary, + description: entry.description, + source_id: entry.source_id, + homepage: entry.homepage, + licenses: licenses.into_iter().map(|l| l.license).collect(), + dependencies: dependencies.into_iter().map(|d| d.dependency).collect(), + providers: providers.into_iter().map(|p| p.provider).collect(), + uri: entry.uri, + hash: entry.hash, + download_size: entry.download_size.map(|i| i as u64), }) - .build() - .execute(transaction.acquire().await?) - .await?; - } + }) + } - // Dependencies - let dependencies = packages - .iter() - .flat_map(|(id, meta)| meta.dependencies.iter().map(move |dependency| (id, dependency))) - .collect::>(); - if !dependencies.is_empty() { - sqlx::QueryBuilder::new( + pub fn file_hashes(&self) -> Result, Error> { + self.pool.exec(|pool| async move { + let hashes = sqlx::query_as::<_, (String,)>( " - INSERT INTO meta_dependencies (package, dependency) + SELECT DISTINCT hash + FROM meta + WHERE hash IS NOT NULL; ", ) - .push_values(dependencies, |mut b, (id, dependency)| { - b.push_bind(id.to_string()).push_bind(dependency.to_string()); - }) - .build() - .execute(transaction.acquire().await?) + .fetch_all(&pool) .await?; - } - // Providers - let providers = packages - .iter() - .flat_map(|(id, meta)| meta.providers.iter().map(move |provider| (id, provider))) - .collect::>(); - if !providers.is_empty() { + Ok(hashes.into_iter().map(|(hash,)| hash).collect()) + }) + } + + pub fn add(&self, id: package::Id, meta: Meta) -> Result<(), Error> { + self.batch_add(vec![(id, meta)]) + } + + pub fn batch_add(&self, packages: Vec<(package::Id, Meta)>) -> Result<(), Error> { + self.pool.exec(|pool| async move { + let mut transaction = pool.begin().await?; + + // Remove package (other tables cascade) + batch_remove_impl(packages.iter().map(|(id, _)| id), transaction.acquire().await?).await?; + + // Create entry sqlx::QueryBuilder::new( " - INSERT INTO meta_providers (package, provider) + INSERT INTO meta ( + package, + name, + version_identifier, + source_release, + build_release, + architecture, + summary, + description, + source_id, + homepage, + uri, + hash, + download_size + ) ", ) - .push_values(providers, |mut b, (id, provider)| { - b.push_bind(id.to_string()).push_bind(provider.to_string()); + .push_values(&packages, |mut b, (id, meta)| { + let Meta { + name, + version_identifier, + source_release, + build_release, + architecture, + summary, + description, + source_id, + homepage, + uri, + hash, + download_size, + .. + } = meta; + + b.push_bind(id.to_string()) + .push_bind(name.to_string()) + .push_bind(version_identifier) + .push_bind(*source_release as i64) + .push_bind(*build_release as i64) + .push_bind(architecture) + .push_bind(summary) + .push_bind(description) + .push_bind(source_id) + .push_bind(homepage) + .push_bind(uri) + .push_bind(hash) + .push_bind(download_size.map(|i| i as i64)); }) .build() .execute(transaction.acquire().await?) .await?; - } - transaction.commit().await?; + // Licenses + let licenses = packages + .iter() + .flat_map(|(id, meta)| meta.licenses.iter().map(move |license| (id, license))) + .collect::>(); + if !licenses.is_empty() { + sqlx::QueryBuilder::new( + " + INSERT INTO meta_licenses (package, license) + ", + ) + .push_values(licenses, |mut b, (id, license)| { + b.push_bind(id.to_string()).push_bind(license); + }) + .build() + .execute(transaction.acquire().await?) + .await?; + } + + // Dependencies + let dependencies = packages + .iter() + .flat_map(|(id, meta)| meta.dependencies.iter().map(move |dependency| (id, dependency))) + .collect::>(); + if !dependencies.is_empty() { + sqlx::QueryBuilder::new( + " + INSERT INTO meta_dependencies (package, dependency) + ", + ) + .push_values(dependencies, |mut b, (id, dependency)| { + b.push_bind(id.to_string()).push_bind(dependency.to_string()); + }) + .build() + .execute(transaction.acquire().await?) + .await?; + } - Ok(()) + // Providers + let providers = packages + .iter() + .flat_map(|(id, meta)| meta.providers.iter().map(move |provider| (id, provider))) + .collect::>(); + if !providers.is_empty() { + sqlx::QueryBuilder::new( + " + INSERT INTO meta_providers (package, provider) + ", + ) + .push_values(providers, |mut b, (id, provider)| { + b.push_bind(id.to_string()).push_bind(provider.to_string()); + }) + .build() + .execute(transaction.acquire().await?) + .await?; + } + + transaction.commit().await?; + + Ok(()) + }) } - pub async fn remove(&self, package: &package::Id) -> Result<(), Error> { - self.batch_remove(Some(package)).await + pub fn remove(&self, package: &package::Id) -> Result<(), Error> { + self.batch_remove(Some(package)) } - pub async fn batch_remove(&self, packages: impl IntoIterator) -> Result<(), Error> { - let pool = self.pool.lock().await; - batch_remove_impl(packages, &*pool).await + pub fn batch_remove<'a>(&self, packages: impl IntoIterator) -> Result<(), Error> { + self.pool + .exec(|pool| async move { batch_remove_impl(packages, &pool).await }) } } @@ -571,11 +569,10 @@ mod test { use super::*; - #[tokio::test] - async fn create_insert_select() { - let database = Database::connect(SqliteConnectOptions::from_str("sqlite::memory:").unwrap()) - .await - .unwrap(); + fn create_insert_select() { + let _guard = runtime::init(); + + let db = Database::connect(SqliteConnectOptions::from_str("sqlite::memory:").unwrap()).unwrap(); let bash_completion = include_bytes!("../../../../test/bash-completion-2.11-1-1-x86_64.stone"); @@ -587,7 +584,7 @@ mod test { let id = package::Id::from("test".to_string()); - database.add(id.clone(), meta.clone()).await.unwrap(); + db.add(id.clone(), meta.clone()).unwrap(); assert_eq!(&meta.name, &"bash-completion".to_string().into()); @@ -596,19 +593,24 @@ mod test { kind: Kind::PackageName, name: "bash-completion".to_string(), }); - let fetched = database.query(Some(lookup)).await.unwrap(); + let fetched = db.query(Some(lookup)).unwrap(); assert_eq!(fetched.len(), 1); - batch_remove_impl([&id], &*database.pool.lock().await).await.unwrap(); + db.pool + .exec({ + let id = id.clone(); + |pool| async move { batch_remove_impl([&id], &pool).await } + }) + .unwrap(); - let result = database.get(&id).await; + let result = db.get(&id); assert!(result.is_err()); // Test wipe - database.add(id.clone(), meta.clone()).await.unwrap(); - database.wipe().await.unwrap(); - let result = database.get(&id).await; + db.add(id.clone(), meta.clone()).unwrap(); + db.wipe().unwrap(); + let result = db.get(&id); assert!(result.is_err()); } } diff --git a/moss/src/db/mod.rs b/moss/src/db/mod.rs index 75ef4538f..9f8d47a2d 100644 --- a/moss/src/db/mod.rs +++ b/moss/src/db/mod.rs @@ -2,6 +2,32 @@ // // SPDX-License-Identifier: MPL-2.0 +use std::{future::Future, sync::Arc}; + +use sqlx::Sqlite; +use tokio::sync::Mutex; + +use crate::runtime; + pub mod layout; pub mod meta; pub mod state; + +#[derive(Debug, Clone)] +struct Pool(Arc>>); + +impl Pool { + fn new(pool: sqlx::Pool) -> Self { + Self(Arc::new(Mutex::new(pool))) + } + + fn exec(&self, f: impl FnOnce(sqlx::Pool) -> F) -> T + where + F: Future, + { + runtime::block_on(async { + let pool = self.0.lock().await.clone(); + f(pool).await + }) + } +} diff --git a/moss/src/db/state/mod.rs b/moss/src/db/state/mod.rs index d01e94e22..133914d6c 100644 --- a/moss/src/db/state/mod.rs +++ b/moss/src/db/state/mod.rs @@ -4,20 +4,20 @@ use chrono::{DateTime, Utc}; use sqlx::sqlite::SqliteConnectOptions; -use sqlx::{Acquire, Executor, Pool, Sqlite}; +use sqlx::{Acquire, Executor}; use thiserror::Error; -use tokio::sync::Mutex; +use super::Pool; use crate::state::{self, Id, Selection}; -use crate::{Installation, State}; +use crate::{runtime, Installation, State}; -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct Database { - pool: Mutex>, + pool: Pool, } impl Database { - pub async fn new(installation: &Installation) -> Result { + pub fn new(installation: &Installation) -> Result { let path = installation.db_path("state"); let options = sqlx::sqlite::SqliteConnectOptions::new() @@ -26,148 +26,149 @@ impl Database { .read_only(installation.read_only()) .foreign_keys(true); - Self::connect(options).await + Self::connect(options) } - async fn connect(options: SqliteConnectOptions) -> Result { - let pool = sqlx::SqlitePool::connect_with(options).await?; - - sqlx::migrate!("src/db/state/migrations").run(&pool).await?; - - Ok(Self { pool: Mutex::new(pool) }) + fn connect(options: SqliteConnectOptions) -> Result { + runtime::block_on(async { + let pool = sqlx::SqlitePool::connect_with(options).await?; + sqlx::migrate!("src/db/state/migrations").run(&pool).await?; + Ok(pool) + }) + .map(|pool| Self { pool: Pool::new(pool) }) } - pub async fn list_ids(&self) -> Result)>, Error> { - let pool = self.pool.lock().await; - - let states = sqlx::query_as::<_, encoding::Created>( - " - SELECT id, created - FROM state; - ", - ) - .fetch_all(&*pool) - .await?; - - Ok(states.into_iter().map(|state| (state.id, state.created)).collect()) + pub fn list_ids(&self) -> Result)>, Error> { + self.pool.exec(|pool| async move { + let states = sqlx::query_as::<_, encoding::Created>( + " + SELECT id, created + FROM state; + ", + ) + .fetch_all(&pool) + .await?; + + Ok(states.into_iter().map(|state| (state.id, state.created)).collect()) + }) } - pub async fn get(&self, id: &Id) -> Result { - let pool = self.pool.lock().await; - - let state_query = sqlx::query_as::<_, encoding::State>( - " - SELECT id, type, created, summary, description - FROM state - WHERE id = ?; - ", - ) - .bind(i64::from(*id)); - let selections_query = sqlx::query_as::<_, encoding::Selection>( - " - SELECT package_id, - explicit, - reason - FROM state_selections - WHERE state_id = ?; - ", - ) - .bind(i64::from(*id)); - - let state = state_query.fetch_one(&*pool).await?; - let selections_rows = selections_query.fetch_all(&*pool).await?; - - let selections = selections_rows - .into_iter() - .map(|row| Selection { - package: row.package_id, - explicit: row.explicit, - reason: row.reason, + pub fn get(&self, id: &Id) -> Result { + self.pool.exec(|pool| async move { + let state_query = sqlx::query_as::<_, encoding::State>( + " + SELECT id, type, created, summary, description + FROM state + WHERE id = ?; + ", + ) + .bind(i64::from(*id)); + let selections_query = sqlx::query_as::<_, encoding::Selection>( + " + SELECT package_id, + explicit, + reason + FROM state_selections + WHERE state_id = ?; + ", + ) + .bind(i64::from(*id)); + + let state = state_query.fetch_one(&pool).await?; + let selections_rows = selections_query.fetch_all(&pool).await?; + + let selections = selections_rows + .into_iter() + .map(|row| Selection { + package: row.package_id, + explicit: row.explicit, + reason: row.reason, + }) + .collect(); + + Ok(State { + id: state.id, + summary: state.summary, + description: state.description, + selections, + created: state.created, + kind: state.kind, }) - .collect(); - - Ok(State { - id: state.id, - summary: state.summary, - description: state.description, - selections, - created: state.created, - kind: state.kind, }) } - pub async fn add( + pub fn add( &self, selections: &[Selection], summary: Option, description: Option, ) -> Result { - let pool = self.pool.lock().await; - let mut transaction = pool.begin().await?; - - let encoding::StateId { id } = sqlx::query_as::<_, encoding::StateId>( - " - INSERT INTO state (type, summary, description) - VALUES (?, ?, ?) - RETURNING id; - ", - ) - .bind(state::Kind::Transaction.to_string()) - .bind(summary) - .bind(description) - .fetch_one(transaction.acquire().await?) - .await?; - - if !selections.is_empty() { - transaction - .execute( - sqlx::QueryBuilder::new( - " - INSERT INTO state_selections (state_id, package_id, explicit, reason) + self.pool + .exec(|pool| async move { + let mut transaction = pool.begin().await?; + + let encoding::StateId { id } = sqlx::query_as::<_, encoding::StateId>( + " + INSERT INTO state (type, summary, description) + VALUES (?, ?, ?) + RETURNING id; ", - ) - .push_values(selections, |mut b, selection| { - b.push_bind(i64::from(id)) - .push_bind(selection.package.to_string()) - .push_bind(selection.explicit) - .push_bind(selection.reason.as_ref()); - }) - .build(), ) + .bind(state::Kind::Transaction.to_string()) + .bind(summary) + .bind(description) + .fetch_one(transaction.acquire().await?) .await?; - } - - transaction.commit().await?; - drop(pool); - let state = self.get(&id).await?; - - Ok(state) + if !selections.is_empty() { + transaction + .execute( + sqlx::QueryBuilder::new( + " + INSERT INTO state_selections (state_id, package_id, explicit, reason) + ", + ) + .push_values(selections, |mut b, selection| { + b.push_bind(i64::from(id)) + .push_bind(selection.package.to_string()) + .push_bind(selection.explicit) + .push_bind(selection.reason.as_ref()); + }) + .build(), + ) + .await?; + } + + transaction.commit().await?; + + Ok(id) + }) + .and_then(|id| self.get(&id)) } - pub async fn remove(&self, state: &state::Id) -> Result<(), Error> { - self.batch_remove(Some(state)).await + pub fn remove(&self, state: &state::Id) -> Result<(), Error> { + self.batch_remove(Some(state)) } - pub async fn batch_remove(&self, states: impl IntoIterator) -> Result<(), Error> { - let pool = self.pool.lock().await; - - let mut query = sqlx::QueryBuilder::new( - " - DELETE FROM state - WHERE id IN ( - ", - ); + pub fn batch_remove<'a>(&self, states: impl IntoIterator) -> Result<(), Error> { + self.pool.exec(|pool| async move { + let mut query = sqlx::QueryBuilder::new( + " + DELETE FROM state + WHERE id IN ( + ", + ); - let mut separated = query.separated(", "); - states.into_iter().for_each(|id| { - separated.push_bind(i64::from(*id)); - }); - separated.push_unseparated(");"); + let mut separated = query.separated(", "); + states.into_iter().for_each(|id| { + separated.push_bind(i64::from(*id)); + }); + separated.push_unseparated(");"); - query.build().execute(&*pool).await?; + query.build().execute(&pool).await?; - Ok(()) + Ok(()) + }) } } @@ -228,11 +229,10 @@ mod test { use super::*; use crate::package; - #[tokio::test] async fn create_insert_select() { - let database = Database::connect(SqliteConnectOptions::from_str("sqlite::memory:").unwrap()) - .await - .unwrap(); + let _guard = runtime::init(); + + let database = Database::connect(SqliteConnectOptions::from_str("sqlite::memory:").unwrap()).unwrap(); let selections = vec![ Selection::explicit(package::Id::from("pkg a".to_string())), @@ -242,7 +242,6 @@ mod test { let state = database .add(&selections, Some("test".to_string()), Some("test".to_string())) - .await .unwrap(); // First record diff --git a/moss/src/lib.rs b/moss/src/lib.rs index 002ee18a9..d84ace770 100644 --- a/moss/src/lib.rs +++ b/moss/src/lib.rs @@ -22,5 +22,5 @@ pub mod package; pub mod registry; pub mod repository; pub mod request; +pub mod runtime; pub mod state; -pub mod stone; diff --git a/moss/src/main.rs b/moss/src/main.rs index a9e7b93c3..6ce87c6c1 100644 --- a/moss/src/main.rs +++ b/moss/src/main.rs @@ -9,9 +9,8 @@ use tui::Stylize; mod cli; /// Main entry point -#[tokio::main] -async fn main() { - if let Err(error) = cli::process().await { +fn main() { + if let Err(error) = cli::process() { report_error(error); std::process::exit(1); } diff --git a/moss/src/registry/job.rs b/moss/src/registry/job.rs deleted file mode 100644 index 80cf092ab..000000000 --- a/moss/src/registry/job.rs +++ /dev/null @@ -1,49 +0,0 @@ -// SPDX-FileCopyrightText: Copyright © 2020-2024 Serpent OS Developers -// -// SPDX-License-Identifier: MPL-2.0 - -use std::path::PathBuf; - -use url::Url; - -use crate::{package, repository}; - -/// What system (domain) does this job operate in? -#[derive(Clone, Debug)] -pub enum Domain { - Package(package::Id), - Repository(repository::Id), -} - -/// Allow us to handle various hash types in future -#[derive(Clone, Debug)] -pub enum CheckType { - /// Verify hashsum using the SHA256 method - Sha256(String), -} - -/// From whence this Job came -#[derive(Clone, Debug)] -pub enum Origin { - /// Locally available - LocalFile(PathBuf), - - /// Must be fetched from a remote URI - RemoteFile(Url), -} - -/// A job is used to describe the operation required to get some pkgID installed locally -#[derive(Clone, Debug)] -pub struct Job { - /// Domain this job is operating on - pub domain: Domain, - - /// Where are we getting this from.. ? - pub origin: Origin, - - /// How do we verify the download? - pub check: Option, - - // How large (in bytes) is the download? - pub size: u64, -} diff --git a/moss/src/registry/mod.rs b/moss/src/registry/mod.rs index 7a6d06e80..27fe00c71 100644 --- a/moss/src/registry/mod.rs +++ b/moss/src/registry/mod.rs @@ -5,7 +5,6 @@ //! Defines an encapsulation of "query plugins", including an interface //! for managing and using them. -use futures::{stream, Future, Stream, StreamExt}; use itertools::Itertools; use crate::package::{self, Package}; @@ -14,7 +13,6 @@ use crate::Provider; pub use self::plugin::Plugin; pub use self::transaction::Transaction; -pub mod job; pub mod plugin; pub mod transaction; @@ -32,67 +30,53 @@ impl Registry { self.plugins.push(plugin); } - fn query<'a: 'b, 'b, F, I>( - &'a self, - query: impl Fn(&'b Plugin) -> F + Copy + 'b, - ) -> impl Stream + 'b + fn query<'a, I>(&'a self, query: impl Fn(&'a Plugin) -> I + Copy + 'a) -> impl Iterator + 'a where - F: Future, - I: IntoIterator, + I: IntoIterator + 'a, { - stream::iter( - self.plugins - .iter() - .sorted_by(|a, b| a.priority().cmp(&b.priority()).reverse()) - .map(move |p| { - stream::once(async move { - let packages = query(p).await; - - stream::iter(packages) - }) - .flatten() - }), - ) - .flatten() + self.plugins + .iter() + .sorted_by(|a, b| a.priority().cmp(&b.priority()).reverse()) + .flat_map(query) } /// Return a sorted stream of [`Package`] by provider - pub fn by_provider<'a: 'b, 'b>( + pub fn by_provider<'a>( &'a self, - provider: &'b Provider, + provider: &'a Provider, flags: package::Flags, - ) -> impl Stream + 'b { + ) -> impl Iterator + 'a { self.query(move |plugin| plugin.query_provider(provider, flags)) } /// Return a sorted stream of [`Package`] by name - pub fn by_name<'a: 'b, 'b>( + pub fn by_name<'a>( &'a self, - package_name: &'b package::Name, + package_name: &'a package::Name, flags: package::Flags, - ) -> impl Stream + 'b { + ) -> impl Iterator + 'a { self.query(move |plugin| plugin.query_name(package_name, flags)) } /// Return a sorted stream of [`Package`] by id - pub fn by_id<'a: 'b, 'b>(&'a self, id: &'b package::Id) -> impl Stream + 'b { + pub fn by_id<'a>(&'a self, id: &'a package::Id) -> impl Iterator + 'a { self.query(move |plugin| plugin.package(id)) } /// Return a sorted stream of [`Package`] matching the given [`Flags`] /// /// [`Flags`]: package::Flags - pub fn list(&self, flags: package::Flags) -> impl Stream + '_ { + pub fn list(&self, flags: package::Flags) -> impl Iterator + '_ { self.query(move |plugin| plugin.list(flags)) } /// Return a sorted stream of installed [`Package`] - pub fn list_installed(&self, flags: package::Flags) -> impl Stream + '_ { + pub fn list_installed(&self, flags: package::Flags) -> impl Iterator + '_ { self.list(flags | package::Flags::INSTALLED) } /// Return a sorted stream of available [`Package`] - pub fn list_available(&self, flags: package::Flags) -> impl Stream + '_ { + pub fn list_available(&self, flags: package::Flags) -> impl Iterator + '_ { self.list(flags | package::Flags::AVAILABLE) } @@ -102,11 +86,11 @@ impl Registry { } /// Return a new transaction for this registry initialised with the incoming package set as installed - pub async fn transaction_with_installed( + pub fn transaction_with_installed( &self, incoming: Vec, ) -> Result, transaction::Error> { - transaction::new_with_installed(self, incoming).await + transaction::new_with_installed(self, incoming) } } @@ -116,8 +100,7 @@ mod test { use super::*; - #[tokio::test] - async fn test_ordering() { + fn test_ordering() { let mut registry = Registry::default(); let package = |id: &str, release| Package { @@ -154,10 +137,10 @@ mod test { vec![package("c", 50), package("d", 1)], ))); - let mut query = registry.list(package::Flags::NONE).enumerate().boxed(); + let query = registry.list(package::Flags::NONE); // Packages are sorted by plugin priority, desc -> release number, desc - while let Some((idx, package)) = query.next().await { + for (idx, package) in query.enumerate() { let id = |id: &str| package::Id::from(id.to_string()); match idx { @@ -170,8 +153,7 @@ mod test { } } - #[tokio::test] - async fn test_flags() { + fn test_flags() { let mut registry = Registry::default(); let package = |id: &str, flags| Package { @@ -207,10 +189,10 @@ mod test { ], ))); - let installed = registry.list_installed(package::Flags::NONE).collect().await; - let available = registry.list_available(package::Flags::NONE).collect().await; - let installed_source = registry.list_installed(package::Flags::SOURCE).collect().await; - let available_source = registry.list_available(package::Flags::SOURCE).collect().await; + let installed = registry.list_installed(package::Flags::NONE).collect(); + let available = registry.list_available(package::Flags::NONE).collect(); + let installed_source = registry.list_installed(package::Flags::SOURCE).collect(); + let available_source = registry.list_available(package::Flags::SOURCE).collect(); fn matches(actual: Vec, expected: &[&'static str]) -> bool { let actual = actual diff --git a/moss/src/registry/plugin/active.rs b/moss/src/registry/plugin/active.rs index b285338c7..2f10e920d 100644 --- a/moss/src/registry/plugin/active.rs +++ b/moss/src/registry/plugin/active.rs @@ -28,8 +28,8 @@ impl Active { } /// Query the given package - pub async fn package(&self, id: &package::Id) -> Option { - match self.db.get(id).await { + pub fn package(&self, id: &package::Id) -> Option { + match self.db.get(id) { Ok(meta) => self.installed_package(id.clone(), meta), Err(db::meta::Error::RowNotFound) => None, Err(error) => { @@ -40,10 +40,10 @@ impl Active { } /// Query, restricted to state - async fn query(&self, flags: package::Flags, filter: Option) -> Vec { + fn query(&self, flags: package::Flags, filter: Option) -> Vec { if flags.contains(package::Flags::INSTALLED) || flags == package::Flags::NONE { // TODO: Error handling - let packages = match self.db.query(filter).await { + let packages = match self.db.query(filter) { Ok(packages) => packages, Err(error) => { warn!("failed to query repository packages: {error}"); @@ -69,20 +69,18 @@ impl Active { } /// List, restricted to state - pub async fn list(&self, flags: package::Flags) -> Vec { - self.query(flags, None).await + pub fn list(&self, flags: package::Flags) -> Vec { + self.query(flags, None) } /// Query all packages that match the given provider identity - pub async fn query_provider(&self, provider: &Provider, flags: package::Flags) -> Vec { + pub fn query_provider(&self, provider: &Provider, flags: package::Flags) -> Vec { self.query(flags, Some(db::meta::Filter::Provider(provider.clone()))) - .await } /// Query matching by name - pub async fn query_name(&self, package_name: &package::Name, flags: package::Flags) -> Vec { + pub fn query_name(&self, package_name: &package::Name, flags: package::Flags) -> Vec { self.query(flags, Some(db::meta::Filter::Name(package_name.clone()))) - .await } pub fn priority(&self) -> u64 { diff --git a/moss/src/registry/plugin/cobble.rs b/moss/src/registry/plugin/cobble.rs index d08507732..6d40c5acd 100644 --- a/moss/src/registry/plugin/cobble.rs +++ b/moss/src/registry/plugin/cobble.rs @@ -2,13 +2,13 @@ // // SPDX-License-Identifier: MPL-2.0 +use std::fs::File; +use std::io; use std::{collections::HashMap, path::PathBuf}; use crate::package::{self, meta, Meta, MissingMetaFieldError, Package}; -use crate::registry::job::Job; -use crate::{stone, Provider}; -use ::stone::read::PayloadKind; -use futures::StreamExt; +use crate::Provider; +use stone::read::PayloadKind; use thiserror::Error; // TODO: @@ -20,22 +20,21 @@ pub struct Cobble { impl Cobble { /// Add a package to the cobble set - pub async fn add_package(&mut self, path: impl Into) -> Result { + pub fn add_package(&mut self, path: impl Into) -> Result { let path = path.into(); - let (_, payloads) = stone::stream_payloads(&path).await?; + let mut file = File::open(&path)?; + let mut reader = stone::read(&mut file)?; + let mut payloads = reader.payloads()?; // Grab the metapayload let metadata = payloads - .filter_map(|result| async { + .find_map(|result| { if let Ok(PayloadKind::Meta(meta)) = result { Some(meta) } else { None } }) - .boxed() - .next() - .await .ok_or(Error::MissingMetaPayload)?; // Whack it into the cobbler @@ -81,10 +80,6 @@ impl Cobble { pub fn priority(&self) -> u64 { u64::MAX } - - pub fn fetch_item(&self, id: &package::Id) -> Job { - todo!() - } } #[derive(Debug, Clone, PartialEq, Eq)] @@ -109,8 +104,11 @@ pub enum Error { #[error("Missing metadata payload")] MissingMetaPayload, + #[error("stone read")] + StoneRead(#[from] stone::read::Error), + #[error("io")] - Io(#[from] stone::read::Error), + Io(#[from] io::Error), #[error("metadata")] Metadata(#[from] MissingMetaFieldError), diff --git a/moss/src/registry/plugin/mod.rs b/moss/src/registry/plugin/mod.rs index 93ae8ec81..e4337aaca 100644 --- a/moss/src/registry/plugin/mod.rs +++ b/moss/src/registry/plugin/mod.rs @@ -19,8 +19,6 @@ pub use self::repository::Repository; #[cfg(test)] pub use self::test::Test; -use super::job::Job; - mod active; mod cobble; mod repository; @@ -41,11 +39,11 @@ pub enum Plugin { impl Plugin { /// Return a package for the given [`package::Id`]. Returns `None` if /// the `package` cannot be located. - pub async fn package(&self, id: &package::Id) -> Option { + pub fn package(&self, id: &package::Id) -> Option { match self { - Plugin::Active(plugin) => plugin.package(id).await, + Plugin::Active(plugin) => plugin.package(id), Plugin::Cobble(plugin) => plugin.package(id), - Plugin::Repository(plugin) => plugin.package(id).await, + Plugin::Repository(plugin) => plugin.package(id), #[cfg(test)] Plugin::Test(plugin) => plugin.package(id), @@ -53,11 +51,11 @@ impl Plugin { } /// List all packages with matching `flags` - pub async fn list(&self, flags: package::Flags) -> package::Sorted> { + pub fn list(&self, flags: package::Flags) -> package::Sorted> { package::Sorted::new(match self { - Plugin::Active(plugin) => plugin.list(flags).await, + Plugin::Active(plugin) => plugin.list(flags), Plugin::Cobble(plugin) => plugin.list(flags), - Plugin::Repository(plugin) => plugin.list(flags).await, + Plugin::Repository(plugin) => plugin.list(flags), #[cfg(test)] Plugin::Test(plugin) => plugin.list(flags), @@ -65,11 +63,11 @@ impl Plugin { } /// Returns a list of packages with matching `provider` and `flags` - pub async fn query_provider(&self, provider: &Provider, flags: package::Flags) -> package::Sorted> { + pub fn query_provider(&self, provider: &Provider, flags: package::Flags) -> package::Sorted> { package::Sorted::new(match self { - Plugin::Active(plugin) => plugin.query_provider(provider, flags).await, + Plugin::Active(plugin) => plugin.query_provider(provider, flags), Plugin::Cobble(plugin) => plugin.query_provider(provider, flags), - Plugin::Repository(plugin) => plugin.query_provider(provider, flags).await, + Plugin::Repository(plugin) => plugin.query_provider(provider, flags), #[cfg(test)] Plugin::Test(plugin) => plugin.query_provider(provider, flags), @@ -77,15 +75,11 @@ impl Plugin { } /// Returns a list of packages with matching `package_name` and `flags` - pub async fn query_name( - &self, - package_name: &package::Name, - flags: package::Flags, - ) -> package::Sorted> { + pub fn query_name(&self, package_name: &package::Name, flags: package::Flags) -> package::Sorted> { package::Sorted::new(match self { - Plugin::Active(plugin) => plugin.query_name(package_name, flags).await, + Plugin::Active(plugin) => plugin.query_name(package_name, flags), Plugin::Cobble(plugin) => plugin.query_name(package_name, flags), - Plugin::Repository(plugin) => plugin.query_name(package_name, flags).await, + Plugin::Repository(plugin) => plugin.query_name(package_name, flags), #[cfg(test)] Plugin::Test(plugin) => plugin.query_name(package_name, flags), @@ -105,25 +99,10 @@ impl Plugin { Plugin::Test(plugin) => plugin.priority, } } - - /// Request that the item is fetched from its location into a storage - /// medium. - pub fn fetch_item(&self, id: &package::Id) -> Job { - match self { - Plugin::Active(_) => panic!("Active plugin queried for fetch"), - Plugin::Cobble(plugin) => plugin.fetch_item(id), - Plugin::Repository(plugin) => plugin.fetch_item(id), - - #[cfg(test)] - Plugin::Test(plugin) => plugin.fetch_item(id), - } - } } #[cfg(test)] pub mod test { - use std::path::PathBuf; - use super::*; #[derive(Debug, Clone, PartialEq, Eq)] @@ -164,16 +143,5 @@ pub mod test { .cloned() .collect() } - - pub fn fetch_item(&self, id: &package::Id) -> Job { - Job { - domain: crate::registry::job::Domain::Package(id.clone()), - origin: crate::registry::job::Origin::LocalFile(PathBuf::from( - "test/bash-completion-2.11-1-1-x86_64.stone", - )), - check: None, - size: 168864, - } - } } } diff --git a/moss/src/registry/plugin/repository.rs b/moss/src/registry/plugin/repository.rs index 458772e7f..89f015b13 100644 --- a/moss/src/registry/plugin/repository.rs +++ b/moss/src/registry/plugin/repository.rs @@ -7,7 +7,6 @@ use log::warn; use crate::{ db, package::{self, Package}, - registry::job::Job, repository, Provider, }; @@ -25,8 +24,8 @@ impl Repository { self.active.repository.priority.into() } - pub async fn package(&self, id: &package::Id) -> Option { - let result = self.active.db.get(id).await; + pub fn package(&self, id: &package::Id) -> Option { + let result = self.active.db.get(id); match result { Ok(meta) => Some(Package { @@ -50,10 +49,10 @@ impl Repository { } } - async fn query(&self, flags: package::Flags, filter: Option) -> Vec { + fn query(&self, flags: package::Flags, filter: Option) -> Vec { if flags.contains(package::Flags::AVAILABLE) || flags == package::Flags::NONE { // TODO: Error handling - let packages = match self.active.db.query(filter).await { + let packages = match self.active.db.query(filter) { Ok(packages) => packages, Err(error) => { warn!("failed to query repository packages: {error}"); @@ -74,23 +73,17 @@ impl Repository { } } - pub async fn list(&self, flags: package::Flags) -> Vec { - self.query(flags, None).await + pub fn list(&self, flags: package::Flags) -> Vec { + self.query(flags, None) } /// Query all packages that match the given provider identity - pub async fn query_provider(&self, provider: &Provider, flags: package::Flags) -> Vec { + pub fn query_provider(&self, provider: &Provider, flags: package::Flags) -> Vec { self.query(flags, Some(db::meta::Filter::Provider(provider.clone()))) - .await } - pub async fn query_name(&self, package_name: &package::Name, flags: package::Flags) -> Vec { + pub fn query_name(&self, package_name: &package::Name, flags: package::Flags) -> Vec { self.query(flags, Some(db::meta::Filter::Name(package_name.clone()))) - .await - } - - pub fn fetch_item(&self, id: &package::Id) -> Job { - todo!() } } diff --git a/moss/src/registry/transaction.rs b/moss/src/registry/transaction.rs index 47b966e7a..ca9843a27 100644 --- a/moss/src/registry/transaction.rs +++ b/moss/src/registry/transaction.rs @@ -3,7 +3,6 @@ // SPDX-License-Identifier: MPL-2.0 use dag::Dag; -use futures::{StreamExt, TryFutureExt}; use serde::{Deserialize, Serialize}; use thiserror::Error; @@ -53,23 +52,20 @@ pub(super) fn new(registry: &Registry) -> Result, Error> { } /// Populate the transaction on initialisation -pub(super) async fn new_with_installed( - registry: &Registry, - incoming: Vec, -) -> Result, Error> { +pub(super) fn new_with_installed(registry: &Registry, incoming: Vec) -> Result, Error> { let mut tx = new(registry)?; - tx.update(incoming, Lookup::InstalledOnly).await?; + tx.update(incoming, Lookup::InstalledOnly)?; Ok(tx) } impl<'a> Transaction<'a> { /// Add a package to this transaction - pub async fn add(&mut self, incoming: Vec) -> Result<(), Error> { - self.update(incoming, Lookup::Global).await + pub fn add(&mut self, incoming: Vec) -> Result<(), Error> { + self.update(incoming, Lookup::Global) } /// Remove a set of packages and their reverse dependencies - pub async fn remove(&mut self, packages: Vec) { + pub fn remove(&mut self, packages: Vec) { // Get transposed subgraph let transposed = self.packages.transpose(); let subgraph = transposed.subgraph(&packages); @@ -87,7 +83,7 @@ impl<'a> Transaction<'a> { } /// Update internal package graph with all incoming packages & their deps - async fn update(&mut self, incoming: Vec, lookup: Lookup) -> Result<(), Error> { + fn update(&mut self, incoming: Vec, lookup: Lookup) -> Result<(), Error> { let mut items = incoming; loop { @@ -100,8 +96,11 @@ impl<'a> Transaction<'a> { let check_node = self.packages.add_node_or_get_index(check_id.clone()); // Grab this package in question - let matches = self.registry.by_id(check_id).collect::>().await; - let package = matches.first().ok_or(Error::NoCandidate(check_id.clone().into()))?; + let package = self + .registry + .by_id(check_id) + .next() + .ok_or(Error::NoCandidate(check_id.clone().into()))?; for dependency in package.meta.dependencies.iter() { let provider = Provider { kind: dependency.kind, @@ -110,8 +109,8 @@ impl<'a> Transaction<'a> { // Now get it resolved let search = match lookup { - Lookup::Global => self.resolve_installation_provider(provider).await?, - Lookup::InstalledOnly => self.resolve_provider(ProviderFilter::InstalledOnly(provider)).await?, + Lookup::Global => self.resolve_installation_provider(provider)?, + Lookup::InstalledOnly => self.resolve_provider(ProviderFilter::InstalledOnly(provider))?, }; // Add dependency node @@ -134,51 +133,39 @@ impl<'a> Transaction<'a> { } /// Attempt to resolve the filterered provider - async fn resolve_provider(&self, filter: ProviderFilter) -> Result { + fn resolve_provider(&self, filter: ProviderFilter) -> Result { match filter { ProviderFilter::All(provider) => self .registry .by_provider(&provider, package::Flags::AVAILABLE) - .boxed() .next() - .await - .map(|p| p.id.clone()) + .map(|p| p.id) .ok_or(Error::NoCandidate(provider.to_string())), ProviderFilter::InstalledOnly(provider) => self .registry .by_provider(&provider, package::Flags::INSTALLED) - .boxed() .next() - .await - .map(|p| p.id.clone()) + .map(|p| p.id) .ok_or(Error::NoCandidate(provider.to_string())), ProviderFilter::Selections(provider) => self .registry .by_provider(&provider, package::Flags::NONE) - .filter_map(|f| async { - if self.packages.node_exists(&f.id) { - Some(f) + .find_map(|p| { + if self.packages.node_exists(&p.id) { + Some(p.id) } else { None } }) - .boxed() - .next() - .await - .map(|p| p.id.clone()) .ok_or(Error::NoCandidate(provider.to_string())), } } // Try all strategies to resolve a provider for installation - async fn resolve_installation_provider(&self, provider: Provider) -> Result { + fn resolve_installation_provider(&self, provider: Provider) -> Result { self.resolve_provider(ProviderFilter::Selections(provider.clone())) - .or_else(|_| async { - self.resolve_provider(ProviderFilter::InstalledOnly(provider.clone())) - .await - }) - .or_else(|_| async { self.resolve_provider(ProviderFilter::All(provider.clone())).await }) - .await + .or_else(|_| self.resolve_provider(ProviderFilter::InstalledOnly(provider.clone()))) + .or_else(|_| self.resolve_provider(ProviderFilter::All(provider))) } } diff --git a/moss/src/repository/manager.rs b/moss/src/repository/manager.rs index 8cbbf035f..153cd97fa 100644 --- a/moss/src/repository/manager.rs +++ b/moss/src/repository/manager.rs @@ -3,15 +3,17 @@ // SPDX-License-Identifier: MPL-2.0 use std::collections::HashMap; -use std::path::PathBuf; +use std::fs::{self, File}; +use std::io; +use std::path::{Path, PathBuf}; -use futures::{future, stream, StreamExt, TryStreamExt}; +use futures::{stream, StreamExt, TryFutureExt, TryStreamExt}; +use itertools::Itertools; use thiserror::Error; -use tokio::{fs, io}; use xxhash_rust::xxh3::xxh3_64; use crate::db::meta; -use crate::{environment, stone}; +use crate::{environment, runtime}; use crate::{package, Installation}; use crate::repository::{self, Repository}; @@ -43,14 +45,14 @@ impl Manager { } /// Create a [`Manager`] for the supplied [`Installation`] using system configurations - pub async fn system(config: config::Manager, installation: Installation) -> Result { - Self::new(Source::System(config), installation).await + pub fn system(config: config::Manager, installation: Installation) -> Result { + Self::new(Source::System(config), installation) } /// Create a [`Manager`] for the supplied [`Installation`] using the provided configurations /// /// [`Manager`] can't be used to `add` new repos in this mode - pub async fn explicit( + pub fn explicit( identifier: impl ToString, repos: repository::Map, installation: Installation, @@ -62,17 +64,15 @@ impl Manager { }, installation, ) - .await } - async fn new(source: Source, installation: Installation) -> Result { + fn new(source: Source, installation: Installation) -> Result { let configs = match &source { Source::System(config) => // Load all configs, default if none exist { config .load::() - .await .into_iter() .reduce(repository::Map::merge) .unwrap_or_default() @@ -81,14 +81,14 @@ impl Manager { }; // Open all repo meta dbs and collect into hash map - let repositories = future::try_join_all(configs.into_iter().map(|(id, repository)| async { - let db = open_meta_db(source.identifier(), &repository, &installation).await?; + let repositories = configs + .into_iter() + .map(|(id, repository)| { + let db = open_meta_db(source.identifier(), &repository, &installation)?; - Ok::<_, Error>((id.clone(), repository::Active { id, repository, db })) - })) - .await? - .into_iter() - .collect(); + Ok((id.clone(), repository::Active { id, repository, db })) + }) + .collect::>()?; Ok(Self { source, @@ -98,7 +98,7 @@ impl Manager { } /// Add a [`Repository`] - pub async fn add_repository(&mut self, id: repository::Id, repository: Repository) -> Result<(), Error> { + pub fn add_repository(&mut self, id: repository::Id, repository: Repository) -> Result<(), Error> { let Source::System(config) = &self.source else { return Err(Error::ExplicitUnsupported); }; @@ -108,11 +108,10 @@ impl Manager { // multiple configuration files { let map = repository::Map::with([(id.clone(), repository.clone())]); - - config.save(&id, &map).await.map_err(Error::SaveConfig)?; + config.save(&id, &map).map_err(Error::SaveConfig)?; } - let db = open_meta_db(self.source.identifier(), &repository, &self.installation).await?; + let db = open_meta_db(self.source.identifier(), &repository, &self.installation)?; self.repositories .insert(id.clone(), repository::Active { id, repository, db }); @@ -123,21 +122,30 @@ impl Manager { /// Refresh all [`Repository`]'s by fetching it's latest index /// file and updating it's associated meta database pub async fn refresh_all(&mut self) -> Result<(), Error> { - // Fetch index file + add to meta_db - future::try_join_all( - self.repositories - .iter() - .map(|(id, state)| refresh_index(self.source.identifier(), state, &self.installation)), - ) - .await?; + // Fetch index files asynchronously + let fetched = stream::iter(&self.repositories) + .map(|(_, state)| { + fetch_index(self.source.identifier(), state, &self.installation) + .and_then(move |file| async move { Ok((state.clone(), file)) }) + }) + .buffer_unordered(environment::MAX_NETWORK_CONCURRENCY) + .try_collect::>() + .await?; + + // Add each file to its meta_db + for (state, file) in fetched { + runtime::unblock(move || update_meta_db(&state, &file)).await?; + } Ok(()) } /// Refresh a [`Repository`] by Id pub async fn refresh(&mut self, id: &repository::Id) -> Result<(), Error> { - if let Some(repo) = self.repositories.get(id) { - refresh_index(self.source.identifier(), repo, &self.installation).await + if let Some(repo) = self.repositories.get(id).cloned() { + let file = fetch_index(self.source.identifier(), &repo, &self.installation).await?; + runtime::unblock(move || update_meta_db(&repo, &file)).await?; + Ok(()) } else { Err(Error::UnknownRepo(id.clone())) } @@ -149,24 +157,29 @@ impl Manager { /// This is useful to call when initializing the moss client in-case users added configs /// manually outside the CLI pub async fn ensure_all_initialized(&mut self) -> Result<(), Error> { - let initialized = stream::iter(&self.repositories) - .filter(|(id, state)| async { - let index_file = - cache_dir(self.source.identifier(), &state.repository, &self.installation).join("stone.index"); - - !index_file.exists() - }) - .map(|(id, state)| async { - println!("Initializing repo {}...", *id); + let uninitialized = self.repositories.iter().filter_map(|(id, state)| { + let index_file = + cache_dir(self.source.identifier(), &state.repository, &self.installation).join("stone.index"); + + if !index_file.exists() { + Some(state) + } else { + None + } + }); - refresh_index(self.source.identifier(), state, &self.installation).await + let fetched = stream::iter(uninitialized) + .map(|state| { + fetch_index(self.source.identifier(), state, &self.installation) + .and_then(move |file| async move { Ok((state.clone(), file)) }) }) .buffer_unordered(environment::MAX_NETWORK_CONCURRENCY) .try_collect::>() .await?; - if !initialized.is_empty() { - println!(); + // Add each file to its meta_db + for (state, file) in fetched { + runtime::unblock(move || update_meta_db(&state, &file)).await?; } Ok(()) @@ -178,7 +191,7 @@ impl Manager { } /// Remove a repository, deleting any related config & cached data - pub async fn remove(&mut self, id: impl Into) -> Result { + pub fn remove(&mut self, id: impl Into) -> Result { // Only allow removal for system repo manager let Source::System(config) = &self.source else { return Err(Error::ExplicitUnsupported); @@ -193,12 +206,12 @@ impl Manager { // Remove cache if cache_dir.exists() { - fs::remove_dir_all(&cache_dir).await.map_err(Error::RemoveDir)?; + fs::remove_dir_all(&cache_dir).map_err(Error::RemoveDir)?; } // Delete config, only succeeds for configs that live in their // own config file w/ matching repo name - if config.delete::(&repo.id).await.is_err() { + if config.delete::(&repo.id).is_err() { return Ok(Removal::ConfigDeleted(false)); } @@ -219,49 +232,57 @@ fn cache_dir(identifier: &str, repo: &Repository, installation: &Installation) - /// Open the meta db file, ensuring it's /// directory exists -async fn open_meta_db( - identifier: &str, - repo: &Repository, - installation: &Installation, -) -> Result { +fn open_meta_db(identifier: &str, repo: &Repository, installation: &Installation) -> Result { let dir = cache_dir(identifier, repo, installation); - fs::create_dir_all(&dir).await.map_err(Error::CreateDir)?; + fs::create_dir_all(&dir).map_err(Error::CreateDir)?; - let db = meta::Database::new(dir.join("db"), installation.read_only()).await?; + let db = meta::Database::new(dir.join("db"), installation.read_only())?; Ok(db) } -/// Fetches a stone index file from the repository URL, -/// saves it to the repo installation path, then -/// loads it's metadata into the meta db -async fn refresh_index(identifier: &str, state: &repository::Active, installation: &Installation) -> Result<(), Error> { +/// Fetches a stone index file from the repository URL +/// and saves it to the repo installation path +async fn fetch_index( + identifier: &str, + state: &repository::Active, + installation: &Installation, +) -> Result { let out_dir = cache_dir(identifier, &state.repository, installation); - fs::create_dir_all(&out_dir).await.map_err(Error::CreateDir)?; + tokio::fs::create_dir_all(&out_dir).await.map_err(Error::CreateDir)?; let out_path = out_dir.join("stone.index"); // Fetch index & write to `out_path` repository::fetch_index(state.repository.uri.clone(), &out_path).await?; + Ok(out_path) +} + +/// Updates a stones metadata into the meta db +fn update_meta_db(state: &repository::Active, index_path: &Path) -> Result<(), Error> { // Wipe db since we're refreshing from a new index file - state.db.wipe().await?; + state.db.wipe()?; // Get a stream of payloads - let (_, payloads) = stone::stream_payloads(&out_path).await?; + let mut file = File::open(index_path).map_err(Error::OpenIndex)?; + let mut reader = stone::read(&mut file)?; + let payloads = reader.payloads()?; // Update each payload into the meta db payloads - .map_err(Error::ReadStone) // Batch up to `DB_BATCH_SIZE` payloads .chunks(environment::DB_BATCH_SIZE) + .into_iter() // Transpose error for early bail - .map(|results| results.into_iter().collect::, _>>()) - .try_for_each(|payloads| async { + .map(|chunk| chunk.into_iter().collect::, _>>()) + .try_for_each(|result| { + let chunk = result?; + // Construct Meta for each payload - let packages = payloads + let packages = chunk .into_iter() .filter_map(|payload| { if let stone::read::PayloadKind::Meta(meta) = payload { @@ -290,9 +311,8 @@ async fn refresh_index(identifier: &str, state: &repository::Active, installatio // package has 13 binds x 1k batch size = 17k. This leaves us // overhead to add more binds in the future, otherwise we can // lower the `DB_BATCH_SIZE`. - state.db.batch_add(packages).await.map_err(Error::Database) - }) - .await?; + state.db.batch_add(packages).map_err(Error::Database) + })?; Ok(()) } @@ -309,6 +329,8 @@ pub enum Error { RemoveDir(#[source] io::Error), #[error("fetch index file")] FetchIndex(#[from] repository::FetchError), + #[error("open index file")] + OpenIndex(#[source] io::Error), #[error("read index file")] ReadStone(#[from] stone::read::Error), #[error("meta db")] diff --git a/moss/src/runtime.rs b/moss/src/runtime.rs new file mode 100644 index 000000000..e2a807afe --- /dev/null +++ b/moss/src/runtime.rs @@ -0,0 +1,69 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2024 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::{ + future::Future, + io, + sync::{OnceLock, RwLock}, +}; + +use tokio::runtime::{self, Handle}; + +static RUNTIME: OnceLock>> = OnceLock::new(); + +pub fn init() -> Guard { + let lock = RUNTIME.get_or_init(Default::default); + *lock.write().unwrap() = Some(Runtime::new().expect("build runtime")); + Guard +} + +fn destroy() { + let rt = RUNTIME + .get() + .unwrap() + .write() + .unwrap() + .take() + .expect("runtime initialized"); + drop(rt); +} + +#[must_use = "runtime is dropped with guard"] +pub struct Guard; + +impl Guard { + pub fn destroy(self) { + destroy(); + } +} + +impl Drop for Guard { + fn drop(&mut self) { + destroy(); + } +} + +struct Runtime(runtime::Runtime); + +impl Runtime { + fn new() -> io::Result { + Ok(Self(runtime::Builder::new_current_thread().enable_all().build()?)) + } +} + +/// Run the provided future on the current runtime. +pub fn block_on(task: F) -> T +where + F: Future, +{ + let _guard = RUNTIME.get().unwrap().read().unwrap(); + let rt = _guard.as_ref().expect("runtime initialized"); + rt.0.block_on(task) +} + +/// Runs the provided function on an executor dedicated to blocking. +pub async fn unblock(f: impl FnOnce() -> T + Send + 'static) -> T { + let handle = Handle::current(); + handle.spawn_blocking(f).await.expect("spawn blocking") +} diff --git a/moss/src/stone.rs b/moss/src/stone.rs deleted file mode 100644 index 7822045ac..000000000 --- a/moss/src/stone.rs +++ /dev/null @@ -1,67 +0,0 @@ -// SPDX-FileCopyrightText: Copyright © 2020-2024 Serpent OS Developers -// -// SPDX-License-Identifier: MPL-2.0 - -pub use stone::header; -pub use stone::payload; -pub use stone::write; - -pub use self::read::stream_payloads; -pub use self::write::Writer; - -pub mod read { - use std::{fs::File, path::PathBuf}; - - use futures::Stream; - pub use stone::read::{Error, PayloadKind}; - use stone::Header; - use tokio::{sync::mpsc, task}; - use tokio_stream::wrappers::ReceiverStream; - - pub async fn stream_payloads( - path: impl Into, - ) -> Result<(Header, impl Stream>), Error> { - // Receive potential error when reading before payloads - let (setup_sender, mut setup_receiver) = mpsc::channel(1); - // Receive payloads - let (payload_sender, payload_receiver) = mpsc::channel(1); - - let path = path.into(); - - // Read payloads in blocking context and send them over channel - task::spawn_blocking(move || { - let setup = || { - let file = File::open(path)?; - stone::read(file) - }; - - match setup() { - Err(error) => { - let _ = setup_sender.blocking_send(Err(error)); - } - Ok(mut stone) => { - let header = stone.header; - - match stone.payloads() { - Err(error) => { - let _ = setup_sender.blocking_send(Err(error)); - } - Ok(payloads) => { - let _ = setup_sender.blocking_send(Ok(header)); - - for result in payloads { - let _ = payload_sender.blocking_send(result); - } - } - } - } - } - }); - - match setup_receiver.recv().await.unwrap() { - // Receive each payload in streaming fashion - Ok(header) => Ok((header, ReceiverStream::new(payload_receiver))), - Err(error) => Err(error), - } - } -} From d6473bd21024a1d69a822a754a29dbaaca3e8990 Mon Sep 17 00:00:00 2001 From: Ikey Doherty Date: Sun, 3 Mar 2024 01:16:02 +0000 Subject: [PATCH 21/26] boulder/build: Fix runtime drops (double drop) Signed-off-by: Ikey Doherty --- boulder/src/build.rs | 2 +- moss/src/runtime.rs | 9 ++------- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/boulder/src/build.rs b/boulder/src/build.rs index 199274ed8..5d53420c0 100644 --- a/boulder/src/build.rs +++ b/boulder/src/build.rs @@ -113,7 +113,7 @@ impl Builder { root::populate(self, repos)?; upstream::sync(&self.recipe, &self.paths)?; - rt.destroy(); + drop(rt); // We want to ensure no threads exist before // cloning into container. Sometimes a deadlock // occurs which appears related to a race condition diff --git a/moss/src/runtime.rs b/moss/src/runtime.rs index e2a807afe..fd876a202 100644 --- a/moss/src/runtime.rs +++ b/moss/src/runtime.rs @@ -29,18 +29,13 @@ fn destroy() { drop(rt); } +/// Drop the Guard to drop the runtime! #[must_use = "runtime is dropped with guard"] pub struct Guard; -impl Guard { - pub fn destroy(self) { - destroy(); - } -} - impl Drop for Guard { fn drop(&mut self) { - destroy(); + destroy() } } From fd984f02c88527152f3bd59be94304c83cb3535f Mon Sep 17 00:00:00 2001 From: Ikey Doherty Date: Sun, 3 Mar 2024 01:16:44 +0000 Subject: [PATCH 22/26] client/postblit: Rework trigger scopes to allow ephemeral roots We rejigger the scope to incorporate the client scope and tightly control the logic behind the bind paths, ensuring all possible paths for trigger isolation and execution are properly accounted for. As a result we can now take advantage of system + transaction triggers from within ephemeral roots, powering the new boulder port. Signed-off-by: Ikey Doherty --- moss/src/client/mod.rs | 31 ++++++++++++-- moss/src/client/postblit.rs | 80 ++++++++++++++++++++++++++++++------- 2 files changed, 93 insertions(+), 18 deletions(-) diff --git a/moss/src/client/mod.rs b/moss/src/client/mod.rs index 13c8e3736..08f7f10b0 100644 --- a/moss/src/client/mod.rs +++ b/moss/src/client/mod.rs @@ -3,7 +3,8 @@ // SPDX-License-Identifier: MPL-2.0 use std::{ - fs, io, + fs::{self, create_dir_all}, + io, os::{fd::RawFd, unix::fs::symlink}, path::{Path, PathBuf}, time::Duration, @@ -219,7 +220,10 @@ impl Client { record_os_release(&self.installation.staging_dir(), Some(state.id))?; // Run all of the transaction triggers - let triggers = postblit::triggers(postblit::TriggerScope::Transaction(&self.installation), &fstree)?; + let triggers = postblit::triggers( + postblit::TriggerScope::Transaction(&self.installation, &self.scope), + &fstree, + )?; create_root_links(&self.installation.isolation_dir())?; for trigger in triggers { trigger.execute()?; @@ -235,7 +239,8 @@ impl Client { } // At this point we're allowed to run system triggers - let sys_triggers = postblit::triggers(postblit::TriggerScope::System(&self.installation), &fstree)?; + let sys_triggers = + postblit::triggers(postblit::TriggerScope::System(&self.installation, &self.scope), &fstree)?; for trigger in sys_triggers { trigger.execute()?; } @@ -245,6 +250,25 @@ impl Client { Scope::Ephemeral { blit_root } => { record_os_release(blit_root, None)?; create_root_links(blit_root)?; + create_root_links(&self.installation.isolation_dir())?; + + let etc = blit_root.join("etc"); + create_dir_all(etc)?; + + // ephemeral tx triggers + let triggers = postblit::triggers( + postblit::TriggerScope::Transaction(&self.installation, &self.scope), + &fstree, + )?; + for trigger in triggers { + trigger.execute()?; + } + // ephemeral system triggers + let sys_triggers = + postblit::triggers(postblit::TriggerScope::System(&self.installation, &self.scope), &fstree)?; + for trigger in sys_triggers { + trigger.execute()?; + } Ok(None) } } @@ -629,6 +653,7 @@ BUG_REPORT_URL="https://github.com/serpent-os""#, Ok(()) } +#[derive(Clone, Debug)] enum Scope { Stateful, Ephemeral { blit_root: PathBuf }, diff --git a/moss/src/client/postblit.rs b/moss/src/client/postblit.rs index d2f8aaafd..4ed11e9a0 100644 --- a/moss/src/client/postblit.rs +++ b/moss/src/client/postblit.rs @@ -8,7 +8,10 @@ //! //! Note that currently we only load from `/usr/share/moss/triggers/{tx,sys.d}/*.yaml` //! and do not yet support local triggers -use std::{path::Path, process}; +use std::{ + path::{Path, PathBuf}, + process, +}; use container::Container; use itertools::Itertools; @@ -46,8 +49,52 @@ impl config::Config for SystemTrigger { /// Defines the scope of triggers #[derive(Clone, Copy, Debug)] pub(super) enum TriggerScope<'a> { - Transaction(&'a Installation), - System(&'a Installation), + Transaction(&'a Installation, &'a super::Scope), + System(&'a Installation, &'a super::Scope), +} + +impl<'a> TriggerScope<'a> { + // Determine the correct root directory + fn root_dir(&self) -> PathBuf { + match self { + TriggerScope::Transaction(install, scope) => match scope { + super::Scope::Stateful => install.staging_dir().clone(), + super::Scope::Ephemeral { blit_root } => blit_root.clone(), + }, + TriggerScope::System(install, scope) => match scope { + super::Scope::Stateful => install.root.clone(), + super::Scope::Ephemeral { blit_root } => blit_root.clone(), + }, + } + } + + /// Join "host" paths, outside the staging filesystem. Ensure no sandbox break for ephemeral + fn host_path(&self, path: impl AsRef) -> PathBuf { + match self { + TriggerScope::Transaction(install, scope) => match scope { + super::Scope::Stateful => install.root.join(path), + super::Scope::Ephemeral { blit_root } => blit_root.join(path), + }, + TriggerScope::System(install, scope) => match scope { + super::Scope::Stateful => install.root.join(path), + super::Scope::Ephemeral { blit_root } => blit_root.join(path), + }, + } + } + + /// Join guest paths, inside the staging filesystem. Ensure no sandbox break for ephemeral + fn guest_path(&self, path: impl AsRef) -> PathBuf { + match self { + TriggerScope::Transaction(install, scope) => match scope { + super::Scope::Stateful => install.staging_path(path), + super::Scope::Ephemeral { blit_root } => blit_root.join(path), + }, + TriggerScope::System(install, scope) => match scope { + super::Scope::Stateful => install.root.join(path), + super::Scope::Ephemeral { blit_root } => blit_root.join(path), + }, + } + } } #[derive(Debug)] @@ -66,12 +113,14 @@ pub(super) fn triggers<'a>( // Load appropriate triggers from their locations and convert back to a vec of Trigger let triggers = match scope { - TriggerScope::Transaction(install) => config::Manager::custom(install.staging_dir().join(trigger_root)) - .load::() - .into_iter() - .map(|t| t.0) - .collect_vec(), - TriggerScope::System(install) => config::Manager::custom(install.root.join(trigger_root)) + TriggerScope::Transaction(install, client_scope) => { + config::Manager::custom(scope.root_dir().join(trigger_root)) + .load::() + .into_iter() + .map(|t| t.0) + .collect_vec() + } + TriggerScope::System(install, client_scope) => config::Manager::custom(scope.root_dir().join(trigger_root)) .load::() .into_iter() .map(|t| t.0) @@ -92,18 +141,18 @@ pub(super) fn triggers<'a>( impl<'a> TriggerRunner<'a> { pub fn execute(&self) -> Result<(), Error> { match self.scope { - TriggerScope::Transaction(install) => { + TriggerScope::Transaction(install, client_scope) => { // TODO: Add caching support via /var/ let isolation = Container::new(install.isolation_dir()) .networking(false) .override_accounts(false) - .bind_ro(install.root.join("etc"), "/etc") - .bind_rw(install.staging_path("usr"), "/usr") + .bind_ro(self.scope.host_path("etc"), "/etc") + .bind_rw(self.scope.guest_path("usr"), "/usr") .work_dir("/"); Ok(isolation.run(|| execute_trigger_directly(&self.trigger))?) } - TriggerScope::System(install) => { + TriggerScope::System(install, client_scope) => { // OK, if the root == `/` then we can run directly, otherwise we need to containerise with RW. if install.root.to_string_lossy() == "/" { Ok(execute_trigger_directly(&self.trigger)?) @@ -111,9 +160,10 @@ impl<'a> TriggerRunner<'a> { let isolation = Container::new(install.isolation_dir()) .networking(false) .override_accounts(false) - .bind_rw(install.root.join("etc"), "/etc") - .bind_rw(install.root.join("usr"), "/usr") + .bind_rw(self.scope.host_path("etc"), "/etc") + .bind_rw(self.scope.guest_path("usr"), "/usr") .work_dir("/"); + Ok(isolation.run(|| execute_trigger_directly(&self.trigger))?) } } From 06807a76b687bd292e48b169d36369a88d4417d0 Mon Sep 17 00:00:00 2001 From: Cory Forsstrom Date: Sun, 3 Mar 2024 07:46:57 -0800 Subject: [PATCH 23/26] Use mutex prior to blocking on async for DB --- moss/src/db/mod.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/moss/src/db/mod.rs b/moss/src/db/mod.rs index 9f8d47a2d..ad8bc9593 100644 --- a/moss/src/db/mod.rs +++ b/moss/src/db/mod.rs @@ -2,10 +2,12 @@ // // SPDX-License-Identifier: MPL-2.0 -use std::{future::Future, sync::Arc}; +use std::{ + future::Future, + sync::{Arc, Mutex}, +}; use sqlx::Sqlite; -use tokio::sync::Mutex; use crate::runtime; @@ -25,9 +27,8 @@ impl Pool { where F: Future, { - runtime::block_on(async { - let pool = self.0.lock().await.clone(); - f(pool).await - }) + let _guard = self.0.lock().expect("mutex guard"); + let pool = _guard.clone(); + runtime::block_on(f(pool)) } } From f4527826cbfd4efc831dfae4d946a655827b2c94 Mon Sep 17 00:00:00 2001 From: Cory Forsstrom Date: Fri, 1 Mar 2024 13:25:55 -0800 Subject: [PATCH 24/26] Add timings to boulder output --- Cargo.lock | 5 +- boulder/Cargo.toml | 1 + boulder/src/architecture.rs | 4 +- boulder/src/build.rs | 272 ++++++++++++++++++------------------ boulder/src/cli/build.rs | 40 +++++- boulder/src/lib.rs | 2 + boulder/src/package.rs | 113 ++++++++------- boulder/src/package/emit.rs | 4 +- boulder/src/timing.rs | 207 +++++++++++++++++++++++++++ 9 files changed, 444 insertions(+), 204 deletions(-) create mode 100644 boulder/src/timing.rs diff --git a/Cargo.lock b/Cargo.lock index ac7e53487..26d513c42 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -194,6 +194,7 @@ dependencies = [ name = "boulder" version = "0.1.0" dependencies = [ + "chrono", "clap", "config", "container", @@ -1079,9 +1080,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.1.0" +version = "2.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" dependencies = [ "equivalent", "hashbrown", diff --git a/boulder/Cargo.toml b/boulder/Cargo.toml index 49f9e30c8..3aa901922 100644 --- a/boulder/Cargo.toml +++ b/boulder/Cargo.toml @@ -14,6 +14,7 @@ stone_recipe = { path = "../crates/stone_recipe" } tui = { path = "../crates/tui" } yaml = { path = "../crates/yaml" } +chrono.workspace = true clap.workspace = true derive_more.workspace = true dirs.workspace = true diff --git a/boulder/src/architecture.rs b/boulder/src/architecture.rs index 9b604b9cc..0a5e43115 100644 --- a/boulder/src/architecture.rs +++ b/boulder/src/architecture.rs @@ -19,7 +19,7 @@ pub const fn host() -> Architecture { } } -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, strum::Display)] #[strum(serialize_all = "lowercase")] pub enum Architecture { X86_64, @@ -37,7 +37,7 @@ impl Architecture { } } -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Display)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Display)] pub enum BuildTarget { #[display(fmt = "{_0}")] Native(Architecture), diff --git a/boulder/src/build.rs b/boulder/src/build.rs index 5d53420c0..961c99d5d 100644 --- a/boulder/src/build.rs +++ b/boulder/src/build.rs @@ -24,16 +24,13 @@ use thiserror::Error; use tui::Stylize; pub mod job; -mod pgo; +pub mod pgo; mod root; mod upstream; use self::job::Job; use crate::{ - architecture::BuildTarget, - container, macros, - package::{self, Packager}, - profile, recipe, util, Env, Macros, Paths, Recipe, + architecture::BuildTarget, container, macros, profile, recipe, timing, util, Env, Macros, Paths, Recipe, Timing, }; pub struct Builder { @@ -103,6 +100,9 @@ impl Builder { } pub fn setup(&self) -> Result<(), Error> { + // Remove old artifacts + util::recreate_dir(&self.paths.artefacts().host).map_err(Error::RecreateArtefactsDir)?; + root::clean(self)?; let rt = runtime::init(); @@ -128,128 +128,115 @@ impl Builder { Ok(()) } - pub fn build(self) -> Result { - container::exec(&self.paths, self.recipe.parsed.options.networking, || { - // We're now in the container =) - - // Set ourselves into our own process group - // and set it as fg term - // - // This is so we can restore this process back as - // the fg term after using `bash` for chroot below - // so we can reestablish SIGINT forwarding to scripts - setpgid(Pid::from_raw(0), Pid::from_raw(0))?; - let pgid = getpgrp(); - ::container::set_term_fg(pgid)?; - - for (i, target) in self.targets.iter().enumerate() { - if i > 0 { - println!(); + pub fn build(&self, timing: &mut Timing) -> Result<(), Error> { + // Set ourselves into our own process group + // and set it as fg term + // + // This is so we can restore this process back as + // the fg term after using `bash` for chroot below + // so we can reestablish SIGINT forwarding to scripts + setpgid(Pid::from_raw(0), Pid::from_raw(0))?; + let pgid = getpgrp(); + ::container::set_term_fg(pgid)?; + + for (i, target) in self.targets.iter().enumerate() { + println!("{}", build_target_prefix(target.build_target, i)); + + for (i, job) in target.jobs.iter().enumerate() { + let is_pgo = job.pgo_stage.is_some(); + + // Recreate work dir for each job + util::recreate_dir(&job.work_dir)?; + // Ensure pgo dir exists + if is_pgo { + let pgo_dir = PathBuf::from(format!("{}-pgo", job.build_dir.display())); + util::ensure_dir_exists(&pgo_dir)?; } - println!("{}", target.build_target.to_string().dim()); - - for (i, job) in target.jobs.iter().enumerate() { - let is_pgo = job.pgo_stage.is_some(); - - // Recreate work dir for each job - util::recreate_dir(&job.work_dir)?; - // Ensure pgo dir exists - if is_pgo { - let pgo_dir = PathBuf::from(format!("{}-pgo", job.build_dir.display())); - util::ensure_dir_exists(&pgo_dir)?; - } - - if let Some(stage) = job.pgo_stage { - if i > 0 { - println!("{}", "│".dim()); - } - println!("{}", format!("│pgo-{stage}").dim()); - } - for (i, (phase, script)) in job.phases.iter().enumerate() { - let pipes = if job.pgo_stage.is_some() { - "││".dim() - } else { - "│".dim() - }; + if let Some(stage) = job.pgo_stage { + println!("{}", pgo_stage_prefix(stage, i)); + } - if i > 0 { - println!("{pipes}"); - } - println!("{pipes}{}", phase.styled(format!("{phase}"))); - - let build_dir = &job.build_dir; - let work_dir = &job.work_dir; - let current_dir = if work_dir.exists() { &work_dir } else { &build_dir }; - - for command in &script.commands { - match command { - script::Command::Break(breakpoint) => { - let line_num = breakpoint_line(breakpoint, &self.recipe, job.target, *phase) - .map(|line_num| format!(" at line {line_num}")) - .unwrap_or_default(); - - println!( - "\n{}{} {}", - "Breakpoint".bold(), - line_num, - if breakpoint.exit { - "(exit)".dim() - } else { - "(continue)".dim() - }, - ); - - // Write env to $HOME/.profile - std::fs::write(build_dir.join(".profile"), format_profile(script))?; - - let mut command = process::Command::new("/bin/bash") - .arg("--login") + for (i, (phase, script)) in job.phases.iter().enumerate() { + println!("{}", phase_prefix(*phase, is_pgo, i)); + + let build_dir = &job.build_dir; + let work_dir = &job.work_dir; + let current_dir = if work_dir.exists() { &work_dir } else { &build_dir }; + + let timer = timing.begin(timing::Kind::Build(timing::Build { + target: job.target, + pgo_stage: job.pgo_stage, + phase: *phase, + })); + + for command in &script.commands { + match command { + script::Command::Break(breakpoint) => { + let line_num = breakpoint_line(breakpoint, &self.recipe, job.target, *phase) + .map(|line_num| format!(" at line {line_num}")) + .unwrap_or_default(); + + println!( + "\n{}{} {}", + "Breakpoint".bold(), + line_num, + if breakpoint.exit { + "(exit)".dim() + } else { + "(continue)".dim() + }, + ); + + // Write env to $HOME/.profile + std::fs::write(build_dir.join(".profile"), format_profile(script))?; + + let mut command = process::Command::new("/bin/bash") + .arg("--login") + .env_clear() + .env("HOME", build_dir) + .env("PATH", "/usr/bin:/usr/sbin") + .env("TERM", "xterm-256color") + .current_dir(current_dir) + .spawn()?; + + command.wait()?; + + // Restore ourselves as fg term since bash steals it + ::container::set_term_fg(pgid)?; + + if breakpoint.exit { + return Ok(()); + } + } + script::Command::Content(content) => { + // TODO: Proper temp file + let script_path = "/tmp/script"; + std::fs::write(script_path, content).unwrap(); + + let result = logged(*phase, is_pgo, "/bin/sh", |command| { + command + .arg(script_path) .env_clear() .env("HOME", build_dir) .env("PATH", "/usr/bin:/usr/sbin") - .env("TERM", "xterm-256color") .current_dir(current_dir) - .spawn()?; - - command.wait()?; + })?; - // Restore ourselves as fg term since bash steals it - ::container::set_term_fg(pgid)?; - - if breakpoint.exit { - return Ok(()); - } - } - script::Command::Content(content) => { - // TODO: Proper temp file - let script_path = "/tmp/script"; - std::fs::write(script_path, content).unwrap(); - - let result = logged(*phase, is_pgo, "/bin/sh", |command| { - command - .arg(script_path) - .env_clear() - .env("HOME", build_dir) - .env("PATH", "/usr/bin:/usr/sbin") - .current_dir(current_dir) - })?; - - if !result.success() { - match result.code() { - Some(code) => { - return Err(ExecError::Code(code)); - } - None => { - if let Some(signal) = result - .signal() - .or_else(|| result.stopped_signal()) - .and_then(|i| Signal::try_from(i).ok()) - { - return Err(ExecError::Signal(signal)); - } else { - return Err(ExecError::UnknownSignal); - } + if !result.success() { + match result.code() { + Some(code) => { + return Err(Error::Code(code)); + } + None => { + if let Some(signal) = result + .signal() + .or_else(|| result.stopped_signal()) + .and_then(|i| Signal::try_from(i).ok()) + { + return Err(Error::Signal(signal)); + } else { + return Err(Error::UnknownSignal); } } } @@ -257,18 +244,39 @@ impl Builder { } } } + + timing.finish(timer); } } + } + + println!(); + + Ok(()) + } +} - println!(); +pub fn build_target_prefix(target: BuildTarget, i: usize) -> String { + let newline = if i > 0 { "\n".into() } else { String::default() }; - Ok(()) - })?; + format!("{}{}", newline, target.to_string().dim()) +} - let packager = Packager::new(self.paths, self.recipe, self.macros, self.targets)?; +pub fn pgo_stage_prefix(stage: pgo::Stage, i: usize) -> String { + let newline = if i > 0 { + format!("{}\n", "│".dim()) + } else { + String::default() + }; - Ok(packager) - } + format!("{}{}", newline, format!("│pgo-{stage}").dim()) +} + +pub fn phase_prefix(phase: job::Phase, is_pgo: bool, i: usize) -> String { + let pipes = if is_pgo { "││".dim() } else { "│".dim() }; + let newline = if i > 0 { format!("{pipes}\n") } else { String::default() }; + + format!("{}{pipes}{}", newline, phase.styled(phase)) } fn logged( @@ -429,22 +437,16 @@ pub enum Error { Container(#[from] container::Error), #[error("recipe")] Recipe(#[from] recipe::Error), - #[error("create packager")] - Package(#[from] package::Error), - #[error("io")] - Io(#[from] io::Error), -} - -#[derive(Debug, Error)] -pub enum ExecError { #[error("failed with status code {0}")] Code(i32), #[error("stopped by signal {}", .0.as_str())] Signal(Signal), #[error("stopped by unknown signal")] UnknownSignal, - #[error(transparent)] + #[error("nix")] Nix(#[from] nix::Error), - #[error(transparent)] + #[error("io")] Io(#[from] io::Error), + #[error("recreate artefacts dir")] + RecreateArtefactsDir(#[source] io::Error), } diff --git a/boulder/src/cli/build.rs b/boulder/src/cli/build.rs index 89ff8e4be..4f8fdc3f4 100644 --- a/boulder/src/cli/build.rs +++ b/boulder/src/cli/build.rs @@ -6,7 +6,9 @@ use std::io; use std::path::PathBuf; use boulder::build::{self, Builder}; -use boulder::{package, profile, Env}; +use boulder::package::Packager; +use boulder::{container, package, profile, timing, Env, Timing}; +use chrono::Local; use clap::Parser; use thiserror::Error; @@ -43,13 +45,37 @@ pub fn handle(command: Command, env: Env) -> Result<(), Error> { return Err(Error::MissingRecipe(recipe)); } - let builder = Builder::new(&recipe, env, profile, ccache)?; + let mut timing = Timing::default(); + + let timer = timing.begin(timing::Kind::Startup); + let builder = Builder::new(&recipe, env, profile, ccache)?; builder.setup()?; - let packager = builder.build()?; + timing.finish(timer); + + let paths = &builder.paths; + let networking = builder.recipe.parsed.options.networking; + + // Build & package from within container + container::exec::(paths, networking, || { + builder.build(&mut timing)?; + + let packager = Packager::new(&builder.paths, &builder.recipe, &builder.macros, &builder.targets)?; + packager.package(&mut timing)?; + + timing.print_table(); + + Ok(()) + })?; + + // Copy artefacts to host recipe dir + package::sync_artefacts(paths).map_err(Error::SyncArtefacts)?; - packager.package()?; + println!( + "Build finished successfully at {}", + Local::now().to_rfc3339_opts(chrono::SecondsFormat::Secs, true) + ); Ok(()) } @@ -64,6 +90,8 @@ pub enum Error { Build(#[from] build::Error), #[error("package artifacts")] Package(#[from] package::Error), - #[error("io")] - Io(#[from] io::Error), + #[error("sync artefacts")] + SyncArtefacts(#[source] io::Error), + #[error("container")] + Container(#[from] container::Error), } diff --git a/boulder/src/lib.rs b/boulder/src/lib.rs index c4703347c..afe90a15f 100644 --- a/boulder/src/lib.rs +++ b/boulder/src/lib.rs @@ -7,6 +7,7 @@ pub use self::macros::Macros; pub use self::paths::Paths; pub use self::profile::Profile; pub use self::recipe::Recipe; +pub use self::timing::Timing; pub mod architecture; pub mod build; @@ -17,4 +18,5 @@ pub mod package; pub mod paths; pub mod profile; pub mod recipe; +pub mod timing; pub mod util; diff --git a/boulder/src/package.rs b/boulder/src/package.rs index c9eca2783..973b73866 100644 --- a/boulder/src/package.rs +++ b/boulder/src/package.rs @@ -11,7 +11,7 @@ use stone::write::digest; use stone_recipe::{script, Package}; use thiserror::Error; -use crate::{build, container, util, Macros, Paths, Recipe}; +use crate::{build, container, timing, util, Macros, Paths, Recipe, Timing}; use self::collect::Collector; use self::emit::emit; @@ -20,15 +20,20 @@ mod analysis; mod collect; mod emit; -pub struct Packager { - paths: Paths, - recipe: Recipe, +pub struct Packager<'a> { + paths: &'a Paths, + recipe: &'a Recipe, packages: HashMap, collector: Collector, } -impl Packager { - pub fn new(paths: Paths, recipe: Recipe, macros: Macros, targets: Vec) -> Result { +impl<'a> Packager<'a> { + pub fn new( + paths: &'a Paths, + recipe: &'a Recipe, + macros: &'a Macros, + targets: &'a [build::Target], + ) -> Result { let mut collector = Collector::new(paths.install().guest); // Arch names used to parse [`Marcos`] for package templates @@ -36,11 +41,11 @@ impl Packager { // We always use "base" plus whatever build targets we've built let arches = Some("base".to_string()) .into_iter() - .chain(targets.into_iter().map(|target| target.build_target.to_string())); + .chain(targets.iter().map(|target| target.build_target.to_string())); // Resolves all package templates from arch macros + recipe file. Also adds // package paths to [`Collector`] - let packages = resolve_packages(arches, ¯os, &recipe, &mut collector)?; + let packages = resolve_packages(arches, macros, recipe, &mut collector)?; Ok(Self { paths, @@ -50,51 +55,47 @@ impl Packager { }) } - pub fn package(self) -> Result<(), Error> { - // Remove old artifacts - util::recreate_dir(&self.paths.artefacts().host).map_err(Error::RecreateArtefactsDir)?; - - // Executed in guest container since file permissions may be borked - // for host if run rootless - container::exec(&self.paths, false, || { - // Hasher used for calculating file digests - let mut hasher = digest::Hasher::new(); - - // Collect all paths under install root - let paths = self - .collector - .enumerate_paths(None, &mut hasher) - .map_err(Error::CollectPaths)?; - - // Process all paths with the analysis chain - // This will determine which files get included - // and what deps / provides they produce - let mut analysis = analysis::Chain::new(&self.paths, &self.recipe, &self.collector, &mut hasher); - analysis.process(paths).map_err(Error::Analysis)?; - - // Combine the package definition with the analysis results - // for that package. We will use this to emit the package stones & manifests. - // - // If no bucket exists, that means no paths matched this package so we can - // safely filter it out - let packages = self - .packages - .iter() - .filter_map(|(name, package)| { - let bucket = analysis.buckets.remove(name)?; - - Some(emit::Package::new(name, &self.recipe.parsed.source, package, bucket)) - }) - .collect::>(); - - // Emit package stones and manifest files to artefact directory - emit(&self.paths, &self.recipe, &packages).map_err(Error::Emit)?; - - Ok(()) as Result<(), Error> - })?; - - // We've exited container, sync artefacts to host - sync_artefacts(&self.paths).map_err(Error::SyncArtefacts)?; + pub fn package(&self, timing: &mut Timing) -> Result<(), Error> { + // Hasher used for calculating file digests + let mut hasher = digest::Hasher::new(); + + let timer = timing.begin(timing::Kind::Analysis); + + // Collect all paths under install root + let paths = self + .collector + .enumerate_paths(None, &mut hasher) + .map_err(Error::CollectPaths)?; + + // Process all paths with the analysis chain + // This will determine which files get included + // and what deps / provides they produce + let mut analysis = analysis::Chain::new(self.paths, self.recipe, &self.collector, &mut hasher); + analysis.process(paths).map_err(Error::Analysis)?; + + timing.finish(timer); + + let timer = timing.begin(timing::Kind::Packaging); + + // Combine the package definition with the analysis results + // for that package. We will use this to emit the package stones & manifests. + // + // If no bucket exists, that means no paths matched this package so we can + // safely filter it out + let packages = self + .packages + .iter() + .filter_map(|(name, package)| { + let bucket = analysis.buckets.remove(name)?; + + Some(emit::Package::new(name, &self.recipe.parsed.source, package, bucket)) + }) + .collect::>(); + + // Emit package stones and manifest files to artefact directory + emit(self.paths, self.recipe, &packages).map_err(Error::Emit)?; + + timing.finish(timer); Ok(()) } @@ -197,7 +198,7 @@ fn resolve_packages( Ok(packages) } -fn sync_artefacts(paths: &Paths) -> Result<(), io::Error> { +pub fn sync_artefacts(paths: &Paths) -> Result<(), io::Error> { for path in util::enumerate_files(&paths.artefacts().host, |_| true)? { let filename = path.file_name().and_then(|p| p.to_str()).unwrap_or_default(); @@ -218,10 +219,6 @@ pub enum Error { Script(#[from] script::Error), #[error("collect install paths")] CollectPaths(#[source] collect::Error), - #[error("recreate artefacts dir")] - RecreateArtefactsDir(#[source] io::Error), - #[error("sync artefacts")] - SyncArtefacts(#[source] io::Error), #[error("analyzing paths")] Analysis(#[source] analysis::BoxError), #[error("emit packages")] diff --git a/boulder/src/package/emit.rs b/boulder/src/package/emit.rs index ec17b689c..85c7563de 100644 --- a/boulder/src/package/emit.rs +++ b/boulder/src/package/emit.rs @@ -90,7 +90,7 @@ impl<'a> Package<'a> { pub fn emit(paths: &Paths, recipe: &Recipe, packages: &[Package]) -> Result<(), Error> { let mut manifest = Manifest::new(paths, recipe, architecture::host()); - println!("Emitting packages\n"); + println!("Packaging"); for package in packages { if !package.is_dbginfo() { @@ -103,6 +103,8 @@ pub fn emit(paths: &Paths, recipe: &Recipe, packages: &[Package]) -> Result<(), manifest.write_binary()?; manifest.write_json()?; + println!(); + Ok(()) } diff --git a/boulder/src/timing.rs b/boulder/src/timing.rs new file mode 100644 index 000000000..efa519031 --- /dev/null +++ b/boulder/src/timing.rs @@ -0,0 +1,207 @@ +use std::{ + collections::BTreeMap, + time::{Duration, Instant}, +}; + +use crate::{architecture::BuildTarget, build}; + +const PROGRESS_WIDTH: usize = 6; +const ELAPSED_WIDTH: usize = 13; + +#[derive(Default)] +pub struct Timing { + startup: Duration, + build: BTreeMap, BTreeMap>>, + analysis: Duration, + packaging: Duration, +} + +impl Timing { + pub fn begin(&mut self, kind: Kind) -> Timer { + Timer(kind, Instant::now()) + } + + pub fn finish(&mut self, timer: Timer) { + let elapsed = timer.1.elapsed(); + + match timer.0 { + Kind::Build( + build @ Build { + target, + pgo_stage, + phase, + }, + ) => { + self.build + .entry(target) + .or_default() + .entry(pgo_stage) + .or_default() + .insert(phase, BuildEntry { build, elapsed }); + } + Kind::Startup => self.startup = elapsed, + Kind::Analysis => self.analysis = elapsed, + Kind::Packaging => self.packaging = elapsed, + } + } + + pub fn print_table(&self) { + let max_prefix_length = self + .build + .values() + .flat_map(|stages| { + stages + .values() + .flat_map(|phases| phases.values().map(BuildEntry::max_prefix_length)) + }) + .max() + .unwrap_or_default() + // No-op (less than "Packaging") + // .max("Startup".len()) + // .max("Analysis".len()) + .max("Packaging".len()); + let total_elapsed = self + .build + .values() + .flat_map(|stages| stages.values().flat_map(|phases| phases.values().map(|e| e.elapsed))) + .sum::() + + self.startup + + self.analysis + + self.packaging; + + println!( + "P{:ELAPSED_WIDTH$} {:>PROGRESS_WIDTH$}", + "hases", "Elapsed", "%", + ); + println!( + "│{:, + pub phase: build::job::Phase, +} + +struct BuildEntry { + build: Build, + elapsed: Duration, +} + +impl BuildEntry { + pub fn max_prefix_length(&self) -> usize { + self.build + .target + .to_string() + .len() + .max( + self.build + .pgo_stage + .map(|stage| stage.to_string().len() + 1) + .unwrap_or_default(), + ) + .max(self.build.phase.to_string().len() + if self.build.pgo_stage.is_some() { 2 } else { 1 }) + } +} + +/// Format a template of `000h00m00.00s`, removing +/// leading zeros for spaces if the duration is +/// too small +fn fmt_elapsed(duration: Duration) -> String { + let total_seconds = duration.as_secs_f32(); + let total_minutes = total_seconds as u64 / 60; + let total_hours = total_minutes / 60; + + // Only pad zeros if next unit exists + let seconds = if total_minutes >= 1 { + format!("{:0>5.2}s", total_seconds % 60.0) + } else { + format!("{:>5.2}s", total_seconds % 60.0) + }; + + let minutes = if total_minutes >= 1 { + // Only pad zeros if next unit exists + if total_hours >= 1 { + format!("{total_minutes:0>2}m") + } else { + format!("{total_minutes:>2}m") + } + } else { + " ".repeat(3) + }; + + let hours = if total_hours >= 1 { + format!("{total_hours:>3}h") + } else { + " ".repeat(4) + }; + + format!("{hours}{minutes}{seconds}") +} + +fn fmt_progress(elapsed: Duration, total: Duration) -> String { + let pct = elapsed.as_secs_f32() / total.as_secs_f32() * 100.0; + + format!("{pct:>5.1}%") +} From 6879ac99cd6d17892c2aaf38fc36afee3484fc6e Mon Sep 17 00:00:00 2001 From: Cory Forsstrom Date: Fri, 1 Mar 2024 14:01:17 -0800 Subject: [PATCH 25/26] Rename collections -> repositories --- boulder/data/profile.d/default-x86_64.yaml | 28 +++++++++++----------- boulder/src/cli/profile.rs | 4 ++-- boulder/src/profile.rs | 4 ++-- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/boulder/data/profile.d/default-x86_64.yaml b/boulder/data/profile.d/default-x86_64.yaml index 3d7b6bdc8..fe519ec50 100644 --- a/boulder/data/profile.d/default-x86_64.yaml +++ b/boulder/data/profile.d/default-x86_64.yaml @@ -1,16 +1,16 @@ default-x86_64: - collections: - volatile: - uri: "https://dev.serpentos.com/volatile/x86_64/stone.index" - description: "Volatile moss repo" - priority: 0 + repositories: + volatile: + uri: "https://dev.serpentos.com/volatile/x86_64/stone.index" + description: "Volatile moss repo" + priority: 0 local-x86_64: - collections: - volatile: - uri: "https://dev.serpentos.com/volatile/x86_64/stone.index" - description: "Volatile moss repo" - priority: 0 - local: - uri: "file:///var/cache/boulder/repos/local-x86_64/stone.index" - description: "Local development moss repo" - priority: 10 + repositories: + volatile: + uri: "https://dev.serpentos.com/volatile/x86_64/stone.index" + description: "Volatile moss repo" + priority: 0 + local: + uri: "file:///var/cache/boulder/repos/local-x86_64/stone.index" + description: "Local development moss repo" + priority: 10 diff --git a/boulder/src/cli/profile.rs b/boulder/src/cli/profile.rs index 74b6c4e5e..eb903c152 100644 --- a/boulder/src/cli/profile.rs +++ b/boulder/src/cli/profile.rs @@ -96,7 +96,7 @@ pub fn list(manager: profile::Manager) -> Result<(), Error> { println!("{id}:"); for (id, repo) in profile - .collections + .repositories .iter() .sorted_by(|(_, a), (_, b)| a.priority.cmp(&b.priority).reverse()) { @@ -118,7 +118,7 @@ pub fn add<'a>( manager.save_profile( id.clone(), Profile { - collections: repository::Map::with(repos), + repositories: repository::Map::with(repos), }, )?; diff --git a/boulder/src/profile.rs b/boulder/src/profile.rs index 6eacb8a96..3e4b3c7a6 100644 --- a/boulder/src/profile.rs +++ b/boulder/src/profile.rs @@ -38,7 +38,7 @@ impl From for Id { /// Profile configuration data #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Profile { - pub collections: repository::Map, + pub repositories: repository::Map, } /// A map of profiles @@ -106,7 +106,7 @@ impl<'a> Manager<'a> { pub fn repositories(&self, profile: &Id) -> Result<&repository::Map, Error> { self.profiles .get(profile) - .map(|profile| &profile.collections) + .map(|profile| &profile.repositories) .ok_or_else(|| Error::MissingProfile(profile.clone())) } From 3ff4f86c1fcb25355e8a181dcfcec6abcd7337e7 Mon Sep 17 00:00:00 2001 From: Cory Forsstrom Date: Fri, 1 Mar 2024 15:18:56 -0800 Subject: [PATCH 26/26] Disable styling on no-tty --- boulder/src/build.rs | 2 +- boulder/src/build/job/phase.rs | 2 +- boulder/src/build/upstream.rs | 2 +- boulder/src/main.rs | 2 +- boulder/src/package/analysis.rs | 2 +- boulder/src/package/emit.rs | 2 +- crates/tui/src/lib.rs | 3 ++- crates/tui/src/styled.rs | 48 +++++++++++++++++++++++++++++++++ moss/src/cli/index.rs | 2 +- moss/src/cli/info.rs | 2 +- moss/src/cli/list.rs | 2 +- moss/src/cli/remove.rs | 2 +- moss/src/cli/state.rs | 2 +- moss/src/client/mod.rs | 2 +- moss/src/main.rs | 2 +- moss/src/package/render.rs | 2 +- moss/src/state.rs | 2 +- 17 files changed, 65 insertions(+), 16 deletions(-) create mode 100644 crates/tui/src/styled.rs diff --git a/boulder/src/build.rs b/boulder/src/build.rs index 5d53420c0..4f400c840 100644 --- a/boulder/src/build.rs +++ b/boulder/src/build.rs @@ -21,7 +21,7 @@ use stone_recipe::{ Script, }; use thiserror::Error; -use tui::Stylize; +use tui::Styled; pub mod job; mod pgo; diff --git a/boulder/src/build/job/phase.rs b/boulder/src/build/job/phase.rs index 011f610b7..49aa65953 100644 --- a/boulder/src/build/job/phase.rs +++ b/boulder/src/build/job/phase.rs @@ -11,7 +11,7 @@ use stone_recipe::{ Script, }; -use tui::Stylize; +use tui::Styled; use super::{work_dir, Error}; use crate::build::pgo; diff --git a/boulder/src/build/upstream.rs b/boulder/src/build/upstream.rs index 7a5a13d95..b63655495 100644 --- a/boulder/src/build/upstream.rs +++ b/boulder/src/build/upstream.rs @@ -15,7 +15,7 @@ use nix::unistd::{linkat, LinkatFlags}; use sha2::{Digest, Sha256}; use thiserror::Error; use tokio::io::AsyncWriteExt; -use tui::{MultiProgress, ProgressBar, ProgressStyle, Stylize}; +use tui::{MultiProgress, ProgressBar, ProgressStyle, Styled}; use url::Url; use crate::{util, Paths, Recipe}; diff --git a/boulder/src/main.rs b/boulder/src/main.rs index 6d58dd8bd..844f05187 100644 --- a/boulder/src/main.rs +++ b/boulder/src/main.rs @@ -4,7 +4,7 @@ use std::error::Error; -use tui::Stylize; +use tui::Styled; mod cli; diff --git a/boulder/src/package/analysis.rs b/boulder/src/package/analysis.rs index a1aac358c..5cffc2d7b 100644 --- a/boulder/src/package/analysis.rs +++ b/boulder/src/package/analysis.rs @@ -9,7 +9,7 @@ use std::{ use moss::{Dependency, Provider}; use stone::write::digest; -use tui::{ProgressBar, ProgressStyle, Stylize}; +use tui::{ProgressBar, ProgressStyle, Styled}; use super::collect::{Collector, PathInfo}; use crate::{Paths, Recipe}; diff --git a/boulder/src/package/emit.rs b/boulder/src/package/emit.rs index ec17b689c..14fd5c478 100644 --- a/boulder/src/package/emit.rs +++ b/boulder/src/package/emit.rs @@ -10,7 +10,7 @@ use std::{ use itertools::Itertools; use moss::{package::Meta, Dependency}; use thiserror::Error; -use tui::{ProgressBar, ProgressReader, ProgressStyle, Stylize}; +use tui::{ProgressBar, ProgressReader, ProgressStyle, Styled}; use self::manifest::Manifest; use super::analysis; diff --git a/crates/tui/src/lib.rs b/crates/tui/src/lib.rs index d5f761d9e..f9220f21c 100644 --- a/crates/tui/src/lib.rs +++ b/crates/tui/src/lib.rs @@ -5,8 +5,10 @@ use std::io::{Read, Write}; pub use self::reexport::*; +pub use self::styled::Styled; pub mod pretty; +mod styled; const DEFAULT_TERM_SIZE: (u16, u16) = (80, 24); @@ -102,7 +104,6 @@ impl Read for ProgressReader { /// Provide a standard approach to ratatui based TUI in moss mod reexport { - pub use crossterm::style::Stylize; pub use dialoguer; pub use indicatif::*; } diff --git a/crates/tui/src/styled.rs b/crates/tui/src/styled.rs new file mode 100644 index 000000000..67dd30842 --- /dev/null +++ b/crates/tui/src/styled.rs @@ -0,0 +1,48 @@ +use std::io::stdout; + +use crossterm::{style::Stylize, tty::IsTty}; + +macro_rules! impl_method { + ($method:ident) => { + fn $method(self) -> ::Styled { + if stdout().is_tty() { + ::$method(self) + } else { + self.stylize() + } + } + }; +} + +/// Wrapper around `Stylized` which does nothing if not a TTY +pub trait Styled: Stylize { + impl_method!(reset); + impl_method!(bold); + impl_method!(underlined); + impl_method!(reverse); + impl_method!(dim); + impl_method!(italic); + impl_method!(negative); + impl_method!(slow_blink); + impl_method!(rapid_blink); + impl_method!(hidden); + impl_method!(crossed_out); + impl_method!(black); + impl_method!(dark_grey); + impl_method!(red); + impl_method!(dark_red); + impl_method!(green); + impl_method!(dark_green); + impl_method!(yellow); + impl_method!(dark_yellow); + impl_method!(blue); + impl_method!(dark_blue); + impl_method!(magenta); + impl_method!(dark_magenta); + impl_method!(cyan); + impl_method!(dark_cyan); + impl_method!(white); + impl_method!(grey); +} + +impl Styled for T where T: Stylize {} diff --git a/moss/src/cli/index.rs b/moss/src/cli/index.rs index 588ab7258..ef5c27f10 100644 --- a/moss/src/cli/index.rs +++ b/moss/src/cli/index.rs @@ -16,7 +16,7 @@ use moss::{ use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use sha2::{Digest, Sha256}; use thiserror::Error; -use tui::{MultiProgress, ProgressBar, ProgressStyle, Stylize}; +use tui::{MultiProgress, ProgressBar, ProgressStyle, Styled}; pub fn command() -> Command { Command::new("index") diff --git a/moss/src/cli/info.rs b/moss/src/cli/info.rs index b289cad8c..e10911c33 100644 --- a/moss/src/cli/info.rs +++ b/moss/src/cli/info.rs @@ -14,7 +14,7 @@ use moss::{ }; use stone::payload::layout; use thiserror::Error; -use tui::Stylize; +use tui::Styled; use vfs::tree::BlitFile; const COLUMN_WIDTH: usize = 20; diff --git a/moss/src/cli/list.rs b/moss/src/cli/list.rs index 59ff5cc24..197bbf5c0 100644 --- a/moss/src/cli/list.rs +++ b/moss/src/cli/list.rs @@ -13,7 +13,7 @@ use moss::{ environment, package::Flags, }; -use tui::Stylize; +use tui::Styled; pub fn command() -> Command { Command::new("list") diff --git a/moss/src/cli/remove.rs b/moss/src/cli/remove.rs index a52055ac1..abaa2d176 100644 --- a/moss/src/cli/remove.rs +++ b/moss/src/cli/remove.rs @@ -18,7 +18,7 @@ use thiserror::Error; use tui::{ dialoguer::{theme::ColorfulTheme, Confirm}, pretty::print_to_columns, - Stylize, + Styled, }; pub fn command() -> Command { diff --git a/moss/src/cli/state.rs b/moss/src/cli/state.rs index 0d23e4260..1e1ec23c2 100644 --- a/moss/src/cli/state.rs +++ b/moss/src/cli/state.rs @@ -10,7 +10,7 @@ use moss::{ environment, state, }; use thiserror::Error; -use tui::Stylize; +use tui::Styled; pub fn command() -> Command { Command::new("state") diff --git a/moss/src/client/mod.rs b/moss/src/client/mod.rs index 08f7f10b0..1ebd40214 100644 --- a/moss/src/client/mod.rs +++ b/moss/src/client/mod.rs @@ -21,7 +21,7 @@ use nix::{ }; use stone::{payload::layout, read::PayloadKind}; use thiserror::Error; -use tui::{MultiProgress, ProgressBar, ProgressStyle, Stylize}; +use tui::{MultiProgress, ProgressBar, ProgressStyle, Styled}; use vfs::tree::{builder::TreeBuilder, BlitFile, Element}; use self::install::install; diff --git a/moss/src/main.rs b/moss/src/main.rs index 6ce87c6c1..0e09ce2bf 100644 --- a/moss/src/main.rs +++ b/moss/src/main.rs @@ -4,7 +4,7 @@ use std::error::Error; -use tui::Stylize; +use tui::Styled; mod cli; diff --git a/moss/src/package/render.rs b/moss/src/package/render.rs index e3d44428d..8904f444c 100644 --- a/moss/src/package/render.rs +++ b/moss/src/package/render.rs @@ -6,7 +6,7 @@ use std::io::Write; use tui::{ pretty::{Column, ColumnDisplay}, - Stylize, + Styled, }; use crate::Package; diff --git a/moss/src/state.rs b/moss/src/state.rs index bc0aab526..8ae1842bb 100644 --- a/moss/src/state.rs +++ b/moss/src/state.rs @@ -6,7 +6,7 @@ use std::io::Write; use chrono::{DateTime, Utc}; use derive_more::{Display, From, Into}; -use tui::{pretty, Stylize}; +use tui::{pretty, Styled}; use crate::package;