diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 0000000..8f1c029 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,11 @@ +# Workspace-level cargo config so wasm32-wasip1 tests run from the workspace +# root via `-p `. Per-package `.cargo/config.toml` files only apply +# when cwd is inside the package directory; this file makes the runners +# discoverable from anywhere in the tree. +# +# Cargo invokes the runner with cwd set to the package's manifest directory +# (e.g. `crates/edgezero-adapter-fastly/`), so the `-C` argument is relative +# to that — `../../examples/...` resolves to the same fastly.toml regardless +# of which adapter package is being tested. +[target.wasm32-wasip1] +runner = "viceroy run -C ../../examples/app-demo/crates/app-demo-adapter-fastly/fastly.toml -- " diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index c23f862..eb8d5c3 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -59,7 +59,7 @@ jobs: # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v5 # Add any setup steps before running the `github/codeql-action/init` action. # This includes steps like installing compilers or runtimes (`actions/setup-node` diff --git a/.github/workflows/deploy-docs.yml b/.github/workflows/deploy-docs.yml index ab71a3c..1c2d322 100644 --- a/.github/workflows/deploy-docs.yml +++ b/.github/workflows/deploy-docs.yml @@ -24,7 +24,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v5 with: fetch-depth: 0 # For lastUpdated feature @@ -38,7 +38,7 @@ jobs: fi - name: Setup Node.js - uses: actions/setup-node@v4 + uses: actions/setup-node@v5 with: node-version: ${{ steps.node-version.outputs.node-version }} cache: "npm" diff --git a/.github/workflows/format.yml b/.github/workflows/format.yml index be42f98..90d138f 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yml @@ -18,10 +18,10 @@ jobs: name: cargo fmt runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Cache cargo dependencies - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: | ~/.cargo/bin/ @@ -60,7 +60,7 @@ jobs: working-directory: docs steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Retrieve Node.js version id: node-version @@ -69,7 +69,7 @@ jobs: shell: bash - name: Use Node.js - uses: actions/setup-node@v4 + uses: actions/setup-node@v5 with: node-version: ${{ steps.node-version.outputs.node-version }} cache: "npm" diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 2c87283..8b58723 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -18,10 +18,10 @@ jobs: name: cargo test runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Cache Cargo dependencies - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: | ~/.cargo/bin/ @@ -46,12 +46,6 @@ jobs: - name: Add wasm targets run: rustup target add wasm32-wasip1 wasm32-unknown-unknown - - name: Setup Viceroy - run: | - if ! command -v viceroy &>/dev/null; then - cargo install viceroy --locked - fi - - name: Fetch dependencies (locked) run: cargo fetch --locked @@ -61,17 +55,30 @@ jobs: - name: Check feature compilation run: cargo check --workspace --all-targets --features "fastly cloudflare spin" - - name: Check Spin wasm32 compilation - run: cargo check -p edgezero-adapter-spin --target wasm32-wasip1 --features spin - - cloudflare-wasm-tests: - name: cloudflare wasm tests + adapter-wasm-tests: + name: ${{ matrix.adapter }} wasm tests runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + - adapter: cloudflare + target: wasm32-unknown-unknown + runner_env: CARGO_TARGET_WASM32_UNKNOWN_UNKNOWN_RUNNER + runner_value: wasm-bindgen-test-runner + - adapter: fastly + target: wasm32-wasip1 + runner_env: CARGO_TARGET_WASM32_WASIP1_RUNNER + runner_value: viceroy run + - adapter: spin + target: wasm32-wasip1 + runner_env: CARGO_TARGET_WASM32_WASIP1_RUNNER + runner_value: wasmtime run steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - name: Cache Cargo dependencies - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: | ~/.cargo/bin/ @@ -79,24 +86,25 @@ jobs: ~/.cargo/registry/cache/ ~/.cargo/git/db/ target/ - key: ${{ runner.os }}-cargo-cloudflare-${{ hashFiles('**/Cargo.lock') }} + key: ${{ runner.os }}-cargo-${{ matrix.adapter }}-${{ hashFiles('**/Cargo.lock') }} restore-keys: | - ${{ runner.os }}-cargo-cloudflare- + ${{ runner.os }}-cargo-${{ matrix.adapter }}- - name: Retrieve Rust version - id: rust-version-cloudflare + id: rust-version run: echo "rust-version=$(grep '^rust ' .tool-versions | awk '{print $2}')" >> $GITHUB_OUTPUT shell: bash - name: Set up Rust tool chain uses: actions-rust-lang/setup-rust-toolchain@v1 with: - toolchain: ${{ steps.rust-version-cloudflare.outputs.rust-version }} + toolchain: ${{ steps.rust-version.outputs.rust-version }} - - name: Add wasm32 target - run: rustup target add wasm32-unknown-unknown + - name: Add wasm target + run: rustup target add ${{ matrix.target }} - name: Resolve wasm-bindgen CLI version + if: matrix.adapter == 'cloudflare' id: wasm-bindgen-version shell: bash run: | @@ -113,62 +121,41 @@ jobs: test -n "$version" echo "version=$version" >> "$GITHUB_OUTPUT" + # `--force` is required because the cargo cache may restore an existing + # `~/.cargo/bin/` from a prior run, which `cargo install` rejects + # by default. Force-overwriting is safe — `--locked` pins the version. - name: Install wasm-bindgen test runner - run: cargo install wasm-bindgen-cli --version "${{ steps.wasm-bindgen-version.outputs.version }}" --locked - - - name: Fetch dependencies (locked) - run: cargo fetch --locked - - - name: Run Cloudflare wasm tests - env: - CARGO_TARGET_WASM32_UNKNOWN_UNKNOWN_RUNNER: wasm-bindgen-test-runner - run: cargo test -p edgezero-adapter-cloudflare --features cloudflare --target wasm32-unknown-unknown --test contract - - - name: Check Cloudflare wasm target - run: cargo check -p edgezero-adapter-cloudflare --features cloudflare --target wasm32-unknown-unknown - - fastly-wasm-tests: - name: fastly wasm tests - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - - name: Cache Cargo dependencies - uses: actions/cache@v4 - with: - path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ - target/ - key: ${{ runner.os }}-cargo-fastly-${{ hashFiles('**/Cargo.lock') }} - restore-keys: | - ${{ runner.os }}-cargo-fastly- + if: matrix.adapter == 'cloudflare' + run: cargo install wasm-bindgen-cli --version "${{ steps.wasm-bindgen-version.outputs.version }}" --locked --force - - name: Retrieve Rust version - id: rust-version-fastly - run: echo "rust-version=$(grep '^rust ' .tool-versions | awk '{print $2}')" >> $GITHUB_OUTPUT + - name: Resolve Viceroy version + if: matrix.adapter == 'fastly' + id: viceroy-version shell: bash - - - name: Set up Rust tool chain - uses: actions-rust-lang/setup-rust-toolchain@v1 - with: - toolchain: ${{ steps.rust-version-fastly.outputs.rust-version }} - - - name: Add wasm targets - run: rustup target add wasm32-wasip1 + run: echo "version=$(grep '^viceroy ' .tool-versions | awk '{print $2}')" >> "$GITHUB_OUTPUT" - name: Setup Viceroy - run: cargo install viceroy --locked + if: matrix.adapter == 'fastly' + # Version comes from .tool-versions (single source of truth shared with + # local dev). viceroy 0.17 raises MSRV to rustc 1.95; we ship 1.91, so + # the .tool-versions entry pins us to a 0.16.x build. + run: cargo install viceroy --version "${{ steps.viceroy-version.outputs.version }}" --locked --force + + - name: Setup Wasmtime + if: matrix.adapter == 'spin' + run: | + if ! command -v wasmtime &>/dev/null; then + curl https://wasmtime.dev/install.sh -sSf | bash + echo "$HOME/.wasmtime/bin" >> "$GITHUB_PATH" + fi - name: Fetch dependencies (locked) run: cargo fetch --locked - - name: Run Fastly wasm tests + - name: Run ${{ matrix.adapter }} wasm tests env: - CARGO_TARGET_WASM32_WASIP1_RUNNER: "viceroy run" - run: cargo test -p edgezero-adapter-fastly --features fastly --target wasm32-wasip1 --test contract + ${{ matrix.runner_env }}: ${{ matrix.runner_value }} + run: cargo test -p edgezero-adapter-${{ matrix.adapter }} --features ${{ matrix.adapter }} --target ${{ matrix.target }} --test contract - - name: Check Fastly wasm target - run: cargo check -p edgezero-adapter-fastly --features fastly --target wasm32-wasip1 + - name: Check ${{ matrix.adapter }} wasm target + run: cargo check -p edgezero-adapter-${{ matrix.adapter }} --features ${{ matrix.adapter }} --target ${{ matrix.target }} diff --git a/.gitignore b/.gitignore index 48a5ede..e25d20e 100644 --- a/.gitignore +++ b/.gitignore @@ -32,3 +32,4 @@ docs/superpowers/ !.vscode/tasks.json !.vscode/launch.json !.vscode/extensions.json +*.rlib diff --git a/.tool-versions b/.tool-versions index 9934717..3b4dbef 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1,3 +1,4 @@ fasltly v13.0.0 nodejs 24.12.0 rust 1.91.1 +viceroy 0.16.4 diff --git a/Cargo.lock b/Cargo.lock index 29d7c26..a4c1573 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -660,7 +660,6 @@ checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" name = "edgezero-adapter" version = "0.1.0" dependencies = [ - "once_cell", "tempfile", "toml", ] @@ -736,6 +735,7 @@ dependencies = [ "log", "log-fastly", "tempfile", + "thiserror 2.0.18", "walkdir", ] @@ -776,7 +776,9 @@ dependencies = [ "log", "serde", "serde_json", + "simple_logger", "tempfile", + "thiserror 2.0.18", "toml", ] diff --git a/Cargo.toml b/Cargo.toml index caa1c80..e8f0486 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -69,3 +69,72 @@ validator = { version = "0.20", features = ["derive"] } walkdir = { version = "2" } web-time = "1" worker = { version = "0.8", features = ["http"] } + +[workspace.lints.clippy] +# Same strict gate as the demo workspace. Allow-list mirrors the demo's +# slim set; every additional exception lives at the call site as a +# documented `#[allow]` or `#[expect]` rather than a workspace allow. +pedantic = { level = "warn", priority = -1 } +restriction = { level = "deny", priority = -1 } + +# Meta — required when enabling `restriction` as a group. +blanket_clippy_restriction_lints = "allow" + +# Documentation — private items don't need full docs. +missing_docs_in_private_items = "allow" + +# Style / formatting — match idiomatic Rust conventions. +implicit_return = "allow" +question_mark_used = "allow" +single_call_fn = "allow" +separated_literal_suffix = "allow" +# `pub_with_shorthand` wants `pub(in crate)` but rustfmt unconditionally +# rewrites that to `pub(crate)`. Five legitimate cross-file `pub(crate)` +# items remain (dispatch_raw, dispatch_with_store_names, parse_uri, +# parse_client_addr, decompress_body) — they need at least crate visibility, +# and there is no spelling that satisfies both the lint and rustfmt. +pub_with_shorthand = "allow" +# `module_name_repetitions` was attempted: 39 sites in edgezero-core, +# centred on three concrete blockers that surfaced during the rename: +# 1. `proxy::Request`/`proxy::Response` would collide with the +# `http::Request`/`http::Response` already imported by every +# consumer; the only viable alternative names (`OutboundRequest`, +# `Outbound`) are strictly more verbose than `ProxyRequest`. +# 2. `manifest.rs` has 17 `Manifest*` types; consumers in adapters, +# cli, demos, scaffold templates, and the macro-generated app +# code use these names directly. Stripping the prefix would force +# every site to write `use edgezero_core::manifest::Spec as Manifest` +# etc. — pure churn for no readability gain since `manifest::Spec` +# reads worse than `Manifest`. +# 3. The macro `#[app]` emits code that references these names by +# their current spelling; renaming requires regenerating every +# generated app with new types and updating CLAUDE.md examples. +# Net: the lint's intent (Rust ecosystem `module::Type` idiom) is +# real, but it conflicts with our flat re-export surface and several +# names cannot be deprefixed without losing meaning. +module_name_repetitions = "allow" + +# `pattern_type_mismatch` and `ref_patterns` are mutually exclusive in modern +# Rust — every `if let Some(x) = &foo` flags the first, every +# `*foo { Variant(ref x) => ... }` flags the second. We pick match-ergonomics. +pattern_type_mismatch = "allow" + +# API design — `exhaustive_structs` fires on the unit struct generated by +# `edgezero_core::app!`. +exhaustive_structs = "allow" +# Only one site triggers `exhaustive_enums` workspace-wide: `Body { Once, +# Stream }`. Marking it `#[non_exhaustive]` would force a wildcard arm +# (`_ => unreachable!()`) at every external `match` site — 37 of them +# across the four adapter crates — and a third Body variant would +# silently panic at runtime instead of producing a compile error. +# Body is intentionally a closed enum. +exhaustive_enums = "allow" + +# Imports / paths +std_instead_of_alloc = "allow" +std_instead_of_core = "allow" + + + +[workspace.lints.rust] +unsafe_code = "deny" \ No newline at end of file diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 0000000..0b4d3d8 --- /dev/null +++ b/clippy.toml @@ -0,0 +1,9 @@ +# Clippy configuration. See https://doc.rust-lang.org/clippy/lint_configuration.html +# +# Test code uses `.unwrap()`, `.expect()`, `panic!`, `assert!`, indexing, and +# other "if-this-fails-the-test-fails" idioms by convention. We keep the +# corresponding restriction lints active in production code but exempt tests. +allow-expect-in-tests = true +allow-indexing-slicing-in-tests = true +allow-panic-in-tests = true +allow-unwrap-in-tests = true diff --git a/crates/edgezero-adapter-axum/Cargo.toml b/crates/edgezero-adapter-axum/Cargo.toml index 9f9b3c9..a8fcbbf 100644 --- a/crates/edgezero-adapter-axum/Cargo.toml +++ b/crates/edgezero-adapter-axum/Cargo.toml @@ -5,6 +5,9 @@ version = { workspace = true } authors = { workspace = true } license = { workspace = true } +[lints] +workspace = true + [features] default = ["axum"] axum = [ diff --git a/crates/edgezero-adapter-axum/src/cli.rs b/crates/edgezero-adapter-axum/src/cli.rs index c070526..4abd9a7 100644 --- a/crates/edgezero-adapter-axum/src/cli.rs +++ b/crates/edgezero-adapter-axum/src/cli.rs @@ -1,3 +1,4 @@ +use std::env; use std::fs; use std::path::{Path, PathBuf}; use std::process::Command; @@ -6,59 +7,15 @@ use ctor::ctor; use edgezero_adapter::cli_support::{ find_manifest_upwards, find_workspace_root, path_distance, read_package_name, }; +use edgezero_adapter::registry::{register_adapter, Adapter, AdapterAction}; use edgezero_adapter::scaffold::{ register_adapter_blueprint, AdapterBlueprint, AdapterFileSpec, CommandTemplates, DependencySpec, LoggingDefaults, ManifestSpec, ReadmeInfo, TemplateRegistration, }; -use edgezero_adapter::{register_adapter, Adapter, AdapterAction}; use toml::Value; use walkdir::WalkDir; -static AXUM_TEMPLATE_REGISTRATIONS: &[TemplateRegistration] = &[ - TemplateRegistration { - name: "axum_Cargo_toml", - contents: include_str!("templates/Cargo.toml.hbs"), - }, - TemplateRegistration { - name: "axum_src_main_rs", - contents: include_str!("templates/src/main.rs.hbs"), - }, - TemplateRegistration { - name: "axum_axum_toml", - contents: include_str!("templates/axum.toml.hbs"), - }, -]; - -static AXUM_FILE_SPECS: &[AdapterFileSpec] = &[ - AdapterFileSpec { - template: "axum_Cargo_toml", - output: "Cargo.toml", - }, - AdapterFileSpec { - template: "axum_src_main_rs", - output: "src/main.rs", - }, - AdapterFileSpec { - template: "axum_axum_toml", - output: "axum.toml", - }, -]; - -static AXUM_DEPENDENCIES: &[DependencySpec] = &[ - DependencySpec { - key: "dep_edgezero_core_axum", - repo_crate: "crates/edgezero-core", - fallback: "edgezero-core = { git = \"https://git@github.com/stackpop/edgezero.git\", package = \"edgezero-core\" }", - features: &[], - }, - DependencySpec { - key: "dep_edgezero_adapter_axum", - repo_crate: "crates/edgezero-adapter-axum", - fallback: - "edgezero-adapter-axum = { git = \"https://git@github.com/stackpop/edgezero.git\", package = \"edgezero-adapter-axum\", default-features = false }", - features: &["axum"], - }, -]; +static AXUM_ADAPTER: AxumCliAdapter = AxumCliAdapter; static AXUM_BLUEPRINT: AdapterBlueprint = AdapterBlueprint { id: "axum", @@ -97,32 +54,74 @@ static AXUM_BLUEPRINT: AdapterBlueprint = AdapterBlueprint { run_module: "edgezero_adapter_axum", }; +static AXUM_DEPENDENCIES: &[DependencySpec] = &[ + DependencySpec { + key: "dep_edgezero_core_axum", + repo_crate: "crates/edgezero-core", + fallback: "edgezero-core = { git = \"https://git@github.com/stackpop/edgezero.git\", package = \"edgezero-core\" }", + features: &[], + }, + DependencySpec { + key: "dep_edgezero_adapter_axum", + repo_crate: "crates/edgezero-adapter-axum", + fallback: + "edgezero-adapter-axum = { git = \"https://git@github.com/stackpop/edgezero.git\", package = \"edgezero-adapter-axum\", default-features = false }", + features: &["axum"], + }, +]; + +static AXUM_FILE_SPECS: &[AdapterFileSpec] = &[ + AdapterFileSpec { + template: "axum_Cargo_toml", + output: "Cargo.toml", + }, + AdapterFileSpec { + template: "axum_src_main_rs", + output: "src/main.rs", + }, + AdapterFileSpec { + template: "axum_axum_toml", + output: "axum.toml", + }, +]; + +static AXUM_TEMPLATE_REGISTRATIONS: &[TemplateRegistration] = &[ + TemplateRegistration { + name: "axum_Cargo_toml", + contents: include_str!("templates/Cargo.toml.hbs"), + }, + TemplateRegistration { + name: "axum_src_main_rs", + contents: include_str!("templates/src/main.rs.hbs"), + }, + TemplateRegistration { + name: "axum_axum_toml", + contents: include_str!("templates/axum.toml.hbs"), + }, +]; + struct AxumCliAdapter; -static AXUM_ADAPTER: AxumCliAdapter = AxumCliAdapter; +struct AxumProject { + cargo_manifest: PathBuf, + crate_dir: PathBuf, + crate_name: String, + port: u16, +} impl Adapter for AxumCliAdapter { - fn name(&self) -> &'static str { - "axum" - } - fn execute(&self, action: AdapterAction, args: &[String]) -> Result<(), String> { match action { AdapterAction::Build => build(args), AdapterAction::Deploy => deploy(args), AdapterAction::Serve => serve(args), + other => Err(format!("axum adapter does not support {other:?}")), } } -} - -pub fn register() { - register_adapter(&AXUM_ADAPTER); - register_adapter_blueprint(&AXUM_BLUEPRINT); -} -#[ctor] -fn register_ctor() { - register(); + fn name(&self) -> &'static str { + "axum" + } } fn build(extra_args: &[String]) -> Result<(), String> { @@ -130,55 +129,10 @@ fn build(extra_args: &[String]) -> Result<(), String> { run_cargo(&project, "build", extra_args) } -fn serve(extra_args: &[String]) -> Result<(), String> { - let project = locate_project()?; - run_cargo(&project, "run", extra_args) -} - fn deploy(_extra_args: &[String]) -> Result<(), String> { Err("Axum adapter does not define a deploy command. Extend your workspace manifest with one if needed.".into()) } -struct AxumProject { - crate_dir: PathBuf, - cargo_manifest: PathBuf, - crate_name: String, - port: u16, -} - -fn locate_project() -> Result { - let cwd = std::env::current_dir().map_err(|err| err.to_string())?; - let manifest = find_axum_manifest(&cwd)?; - read_axum_project(&manifest) -} - -fn run_cargo(project: &AxumProject, subcommand: &str, extra_args: &[String]) -> Result<(), String> { - let display = project.crate_dir.display(); - println!( - "[edgezero] Axum {subcommand} ({}) in {} (port: {})", - project.crate_name, display, project.port - ); - let mut command = Command::new("cargo"); - command.arg(subcommand); - command.arg("--manifest-path"); - command.arg( - project - .cargo_manifest - .to_str() - .ok_or_else(|| format!("invalid manifest path {}", project.cargo_manifest.display()))?, - ); - command.args(extra_args); - command.current_dir(&project.crate_dir); - let status = command - .status() - .map_err(|err| format!("failed to run cargo {subcommand}: {err}"))?; - if status.success() { - Ok(()) - } else { - Err(format!("cargo {subcommand} failed with status {}", status)) - } -} - fn find_axum_manifest(start: &Path) -> Result { if let Some(found) = find_manifest_upwards(start, "axum.toml") { return Ok(found); @@ -190,15 +144,12 @@ fn find_axum_manifest(start: &Path) -> Result { .max_depth(8) .into_iter() .filter_map(Result::ok) - .map(|entry| entry.into_path()) + .map(walkdir::DirEntry::into_path) .filter(|path| { - path.file_name() - .map(|name| name == "axum.toml") - .unwrap_or(false) + path.file_name().is_some_and(|name| name == "axum.toml") && path .parent() - .map(|dir| dir.join("Cargo.toml").exists()) - .unwrap_or(false) + .is_some_and(|dir| dir.join("Cargo.toml").exists()) }) .collect(); @@ -214,6 +165,12 @@ fn find_axum_manifest(start: &Path) -> Result { Ok(candidates.remove(0)) } +fn locate_project() -> Result { + let cwd = env::current_dir().map_err(|err| err.to_string())?; + let manifest = find_axum_manifest(&cwd)?; + read_axum_project(&manifest) +} + fn read_axum_project(manifest: &Path) -> Result { let contents = fs::read_to_string(manifest) .map_err(|err| format!("failed to read {}: {err}", manifest.display()))?; @@ -225,13 +182,13 @@ fn read_axum_project(manifest: &Path) -> Result { .and_then(Value::as_table) .ok_or_else(|| format!("adapter table missing in {}", manifest.display()))?; - let crate_dir = adapter + let crate_dir_rel = adapter .get("crate_dir") .and_then(Value::as_str) .ok_or_else(|| format!("adapter.crate_dir missing in {}", manifest.display()))?; let manifest_dir = manifest.parent().unwrap_or_else(|| Path::new(".")); - let crate_dir = manifest_dir.join(crate_dir); + let crate_dir = manifest_dir.join(crate_dir_rel); let cargo_manifest = crate_dir.join("Cargo.toml"); if !cargo_manifest.exists() { return Err(format!( @@ -241,41 +198,85 @@ fn read_axum_project(manifest: &Path) -> Result { )); } - let crate_name = adapter - .get("crate") - .and_then(Value::as_str) - .map(|s| s.to_string()) - .unwrap_or_else(|| { + let crate_name = adapter.get("crate").and_then(Value::as_str).map_or_else( + || { read_package_name(&cargo_manifest).unwrap_or_else(|_| { crate_dir .file_name() .and_then(|n| n.to_str()) .unwrap_or("axum-adapter") - .to_string() + .to_owned() }) - }); + }, + ToString::to_string, + ); let port = match adapter.get("port").and_then(Value::as_integer) { - Some(value) => { - if !(1..=u16::MAX as i64).contains(&value) { - return Err(format!( + Some(port_value) => u16::try_from(port_value) + .ok() + .filter(|port| *port > 0) + .ok_or_else(|| { + format!( "adapter.port in {} must be between 1 and 65535", manifest.display() - )); - } - value as u16 - } + ) + })?, None => 8787, }; Ok(AxumProject { - crate_dir, cargo_manifest, + crate_dir, crate_name, port, }) } +#[inline] +pub fn register() { + register_adapter(&AXUM_ADAPTER); + register_adapter_blueprint(&AXUM_BLUEPRINT); +} + +#[ctor] +fn register_ctor() { + register(); +} + +fn run_cargo(project: &AxumProject, subcommand: &str, extra_args: &[String]) -> Result<(), String> { + let display = project.crate_dir.display(); + log::info!( + "[edgezero] Axum {subcommand} ({}) in {} (port: {})", + project.crate_name, + display, + project.port + ); + let mut command = Command::new("cargo"); + command.arg(subcommand); + command.arg("--manifest-path"); + command.arg( + project + .cargo_manifest + .to_str() + .ok_or_else(|| format!("invalid manifest path {}", project.cargo_manifest.display()))?, + ); + command.args(extra_args); + command.current_dir(&project.crate_dir); + let status = command + .status() + .map_err(|err| format!("failed to run cargo {subcommand}: {err}"))?; + if status.success() { + Ok(()) + } else { + Err(format!("cargo {subcommand} failed with status {status}")) + } +} + +fn serve(extra_args: &[String]) -> Result<(), String> { + let project = locate_project()?; + run_cargo(&project, "run", extra_args) +} + #[cfg(test)] mod tests { use super::*; @@ -283,25 +284,82 @@ mod tests { use tempfile::tempdir; #[test] - fn read_axum_project_loads_defaults() { + fn adapter_name_is_axum() { + assert_eq!(AXUM_ADAPTER.name(), "axum"); + } + + #[test] + fn blueprint_has_correct_id() { + assert_eq!(AXUM_BLUEPRINT.id, "axum"); + assert_eq!(AXUM_BLUEPRINT.display_name, "Axum"); + } + + #[test] + fn deploy_returns_error() { + let result = deploy(&[]); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .contains("does not define a deploy command")); + } + + #[test] + fn find_axum_manifest_finds_closest() { let dir = tempdir().unwrap(); let root = dir.path(); + let nested = root.join("level1/level2"); + fs::create_dir_all(&nested).unwrap(); + + // Create axum.toml at root + fs::write(root.join("Cargo.toml"), "[workspace]").unwrap(); fs::write( root.join("axum.toml"), - "[adapter]\ncrate = \"demo\"\ncrate_dir = \".\"\n", + "[adapter]\ncrate = \"root\"\ncrate_dir = \".\"\n", ) .unwrap(); + + // Create axum.toml at level1 fs::write( - root.join("Cargo.toml"), - "[package]\nname = \"demo\"\nversion = \"0.1.0\"\n", + root.join("level1/Cargo.toml"), + "[package]\nname = \"level1\"\nversion = \"0.1.0\"\n", + ) + .unwrap(); + fs::write( + root.join("level1/axum.toml"), + "[adapter]\ncrate = \"level1\"\ncrate_dir = \".\"\n", ) .unwrap(); - let project = read_axum_project(&root.join("axum.toml")).expect("project"); - assert_eq!(project.crate_name, "demo"); - assert_eq!(project.crate_dir, root); - assert_eq!(project.cargo_manifest, root.join("Cargo.toml")); - assert_eq!(project.port, 8787); + // Search from level2, should find level1's axum.toml (closer) + let found = find_axum_manifest(&nested).expect("manifest"); + assert_eq!(found, root.join("level1/axum.toml")); + } + + #[test] + fn find_axum_manifest_finds_in_current_dir() { + let dir = tempdir().unwrap(); + let root = dir.path(); + fs::write(root.join("Cargo.toml"), "[workspace]").unwrap(); + fs::write( + root.join("axum.toml"), + "[adapter]\ncrate = \"demo\"\ncrate_dir = \".\"\n", + ) + .unwrap(); + + let found = find_axum_manifest(root).expect("manifest"); + assert_eq!(found, root.join("axum.toml")); + } + + #[test] + fn find_axum_manifest_returns_error_when_not_found() { + let dir = tempdir().unwrap(); + let root = dir.path(); + // Create an empty directory with a Cargo.toml but no axum.toml + fs::write(root.join("Cargo.toml"), "[workspace]").unwrap(); + + let result = find_axum_manifest(root); + assert!(result.is_err()); + assert!(result.unwrap_err().contains("could not locate axum.toml")); } #[test] @@ -322,12 +380,12 @@ mod tests { } #[test] - fn read_axum_project_uses_custom_port() { + fn read_axum_project_accepts_max_valid_port() { let dir = tempdir().unwrap(); let root = dir.path(); fs::write( root.join("axum.toml"), - "[adapter]\ncrate = \"demo\"\ncrate_dir = \".\"\nport = 4001\n", + "[adapter]\ncrate = \"demo\"\ncrate_dir = \".\"\nport = 65535\n", ) .unwrap(); fs::write( @@ -337,16 +395,16 @@ mod tests { .unwrap(); let project = read_axum_project(&root.join("axum.toml")).expect("project"); - assert_eq!(project.port, 4001); + assert_eq!(project.port, 0xFFFF); } #[test] - fn read_axum_project_rejects_invalid_port() { + fn read_axum_project_accepts_min_valid_port() { let dir = tempdir().unwrap(); let root = dir.path(); fs::write( root.join("axum.toml"), - "[adapter]\ncrate = \"demo\"\ncrate_dir = \".\"\nport = 70000\n", + "[adapter]\ncrate = \"demo\"\ncrate_dir = \".\"\nport = 1\n", ) .unwrap(); fs::write( @@ -355,39 +413,33 @@ mod tests { ) .unwrap(); - let result = read_axum_project(&root.join("axum.toml")); - match result { - Ok(_) => panic!("expected error"), - Err(e) => assert!(e.contains("must be between 1 and 65535")), - } + let project = read_axum_project(&root.join("axum.toml")).expect("project"); + assert_eq!(project.port, 1); } #[test] - fn read_axum_project_rejects_zero_port() { + fn read_axum_project_falls_back_to_package_name() { let dir = tempdir().unwrap(); let root = dir.path(); - fs::write( - root.join("axum.toml"), - "[adapter]\ncrate = \"demo\"\ncrate_dir = \".\"\nport = 0\n", - ) - .unwrap(); + // No crate key in adapter table + fs::write(root.join("axum.toml"), "[adapter]\ncrate_dir = \".\"\n").unwrap(); fs::write( root.join("Cargo.toml"), - "[package]\nname = \"demo\"\nversion = \"0.1.0\"\n", + "[package]\nname = \"my-package\"\nversion = \"0.1.0\"\n", ) .unwrap(); - let result = read_axum_project(&root.join("axum.toml")); - assert!(result.is_err()); + let project = read_axum_project(&root.join("axum.toml")).expect("project"); + assert_eq!(project.crate_name, "my-package"); } #[test] - fn read_axum_project_rejects_negative_port() { + fn read_axum_project_loads_defaults() { let dir = tempdir().unwrap(); let root = dir.path(); fs::write( root.join("axum.toml"), - "[adapter]\ncrate = \"demo\"\ncrate_dir = \".\"\nport = -1\n", + "[adapter]\ncrate = \"demo\"\ncrate_dir = \".\"\n", ) .unwrap(); fs::write( @@ -396,15 +448,22 @@ mod tests { ) .unwrap(); - let result = read_axum_project(&root.join("axum.toml")); - assert!(result.is_err()); + let project = read_axum_project(&root.join("axum.toml")).expect("project"); + assert_eq!(project.crate_name, "demo"); + assert_eq!(project.crate_dir, root); + assert_eq!(project.cargo_manifest, root.join("Cargo.toml")); + assert_eq!(project.port, 8787); } #[test] - fn read_axum_project_rejects_missing_adapter_table() { + fn read_axum_project_rejects_invalid_port() { let dir = tempdir().unwrap(); let root = dir.path(); - fs::write(root.join("axum.toml"), "[other]\nkey = \"value\"\n").unwrap(); + fs::write( + root.join("axum.toml"), + "[adapter]\ncrate = \"demo\"\ncrate_dir = \".\"\nport = 70000\n", + ) + .unwrap(); fs::write( root.join("Cargo.toml"), "[package]\nname = \"demo\"\nversion = \"0.1.0\"\n", @@ -414,15 +473,15 @@ mod tests { let result = read_axum_project(&root.join("axum.toml")); match result { Ok(_) => panic!("expected error"), - Err(e) => assert!(e.contains("adapter table missing")), + Err(err) => assert!(err.contains("must be between 1 and 65535")), } } #[test] - fn read_axum_project_rejects_missing_crate_dir() { + fn read_axum_project_rejects_missing_adapter_table() { let dir = tempdir().unwrap(); let root = dir.path(); - fs::write(root.join("axum.toml"), "[adapter]\ncrate = \"demo\"\n").unwrap(); + fs::write(root.join("axum.toml"), "[other]\nkey = \"value\"\n").unwrap(); fs::write( root.join("Cargo.toml"), "[package]\nname = \"demo\"\nversion = \"0.1.0\"\n", @@ -432,7 +491,7 @@ mod tests { let result = read_axum_project(&root.join("axum.toml")); match result { Ok(_) => panic!("expected error"), - Err(e) => assert!(e.contains("crate_dir missing")), + Err(err) => assert!(err.contains("adapter table missing")), } } @@ -452,55 +511,54 @@ mod tests { let result = read_axum_project(&root.join("axum.toml")); match result { Ok(_) => panic!("expected error"), - Err(e) => assert!(e.contains("Cargo.toml missing")), + Err(err) => assert!(err.contains("Cargo.toml missing")), } } #[test] - fn read_axum_project_falls_back_to_package_name() { + fn read_axum_project_rejects_missing_crate_dir() { let dir = tempdir().unwrap(); let root = dir.path(); - // No crate key in adapter table - fs::write(root.join("axum.toml"), "[adapter]\ncrate_dir = \".\"\n").unwrap(); + fs::write(root.join("axum.toml"), "[adapter]\ncrate = \"demo\"\n").unwrap(); fs::write( root.join("Cargo.toml"), - "[package]\nname = \"my-package\"\nversion = \"0.1.0\"\n", + "[package]\nname = \"demo\"\nversion = \"0.1.0\"\n", ) .unwrap(); - let project = read_axum_project(&root.join("axum.toml")).expect("project"); - assert_eq!(project.crate_name, "my-package"); + let result = read_axum_project(&root.join("axum.toml")); + match result { + Ok(_) => panic!("expected error"), + Err(err) => assert!(err.contains("crate_dir missing")), + } } #[test] - fn read_axum_project_with_relative_crate_dir() { + fn read_axum_project_rejects_negative_port() { let dir = tempdir().unwrap(); let root = dir.path(); - let adapter_dir = root.join("crates/my-adapter"); - fs::create_dir_all(&adapter_dir).unwrap(); fs::write( root.join("axum.toml"), - "[adapter]\ncrate = \"my-adapter\"\ncrate_dir = \"crates/my-adapter\"\n", + "[adapter]\ncrate = \"demo\"\ncrate_dir = \".\"\nport = -1\n", ) .unwrap(); fs::write( - adapter_dir.join("Cargo.toml"), - "[package]\nname = \"my-adapter\"\nversion = \"0.1.0\"\n", + root.join("Cargo.toml"), + "[package]\nname = \"demo\"\nversion = \"0.1.0\"\n", ) .unwrap(); - let project = read_axum_project(&root.join("axum.toml")).expect("project"); - assert_eq!(project.crate_name, "my-adapter"); - assert_eq!(project.crate_dir, adapter_dir); + let result = read_axum_project(&root.join("axum.toml")); + assert!(result.is_err()); } #[test] - fn read_axum_project_accepts_max_valid_port() { + fn read_axum_project_rejects_zero_port() { let dir = tempdir().unwrap(); let root = dir.path(); fs::write( root.join("axum.toml"), - "[adapter]\ncrate = \"demo\"\ncrate_dir = \".\"\nport = 65535\n", + "[adapter]\ncrate = \"demo\"\ncrate_dir = \".\"\nport = 0\n", ) .unwrap(); fs::write( @@ -509,17 +567,17 @@ mod tests { ) .unwrap(); - let project = read_axum_project(&root.join("axum.toml")).expect("project"); - assert_eq!(project.port, 65535); + let result = read_axum_project(&root.join("axum.toml")); + assert!(result.is_err()); } #[test] - fn read_axum_project_accepts_min_valid_port() { + fn read_axum_project_uses_custom_port() { let dir = tempdir().unwrap(); let root = dir.path(); fs::write( root.join("axum.toml"), - "[adapter]\ncrate = \"demo\"\ncrate_dir = \".\"\nport = 1\n", + "[adapter]\ncrate = \"demo\"\ncrate_dir = \".\"\nport = 4001\n", ) .unwrap(); fs::write( @@ -529,85 +587,28 @@ mod tests { .unwrap(); let project = read_axum_project(&root.join("axum.toml")).expect("project"); - assert_eq!(project.port, 1); - } - - #[test] - fn find_axum_manifest_returns_error_when_not_found() { - let dir = tempdir().unwrap(); - let root = dir.path(); - // Create an empty directory with a Cargo.toml but no axum.toml - fs::write(root.join("Cargo.toml"), "[workspace]").unwrap(); - - let result = find_axum_manifest(root); - assert!(result.is_err()); - assert!(result.unwrap_err().contains("could not locate axum.toml")); - } - - #[test] - fn find_axum_manifest_finds_in_current_dir() { - let dir = tempdir().unwrap(); - let root = dir.path(); - fs::write(root.join("Cargo.toml"), "[workspace]").unwrap(); - fs::write( - root.join("axum.toml"), - "[adapter]\ncrate = \"demo\"\ncrate_dir = \".\"\n", - ) - .unwrap(); - - let found = find_axum_manifest(root).expect("manifest"); - assert_eq!(found, root.join("axum.toml")); + assert_eq!(project.port, 4001); } #[test] - fn find_axum_manifest_finds_closest() { + fn read_axum_project_with_relative_crate_dir() { let dir = tempdir().unwrap(); let root = dir.path(); - let nested = root.join("level1/level2"); - fs::create_dir_all(&nested).unwrap(); - - // Create axum.toml at root - fs::write(root.join("Cargo.toml"), "[workspace]").unwrap(); + let adapter_dir = root.join("crates/my-adapter"); + fs::create_dir_all(&adapter_dir).unwrap(); fs::write( root.join("axum.toml"), - "[adapter]\ncrate = \"root\"\ncrate_dir = \".\"\n", - ) - .unwrap(); - - // Create axum.toml at level1 - fs::write( - root.join("level1/Cargo.toml"), - "[package]\nname = \"level1\"\nversion = \"0.1.0\"\n", + "[adapter]\ncrate = \"my-adapter\"\ncrate_dir = \"crates/my-adapter\"\n", ) .unwrap(); fs::write( - root.join("level1/axum.toml"), - "[adapter]\ncrate = \"level1\"\ncrate_dir = \".\"\n", + adapter_dir.join("Cargo.toml"), + "[package]\nname = \"my-adapter\"\nversion = \"0.1.0\"\n", ) .unwrap(); - // Search from level2, should find level1's axum.toml (closer) - let found = find_axum_manifest(&nested).expect("manifest"); - assert_eq!(found, root.join("level1/axum.toml")); - } - - #[test] - fn deploy_returns_error() { - let result = deploy(&[]); - assert!(result.is_err()); - assert!(result - .unwrap_err() - .contains("does not define a deploy command")); - } - - #[test] - fn adapter_name_is_axum() { - assert_eq!(AXUM_ADAPTER.name(), "axum"); - } - - #[test] - fn blueprint_has_correct_id() { - assert_eq!(AXUM_BLUEPRINT.id, "axum"); - assert_eq!(AXUM_BLUEPRINT.display_name, "Axum"); + let project = read_axum_project(&root.join("axum.toml")).expect("project"); + assert_eq!(project.crate_name, "my-adapter"); + assert_eq!(project.crate_dir, adapter_dir); } } diff --git a/crates/edgezero-adapter-axum/src/config_store.rs b/crates/edgezero-adapter-axum/src/config_store.rs index 2902518..8fe373d 100644 --- a/crates/edgezero-adapter-axum/src/config_store.rs +++ b/crates/edgezero-adapter-axum/src/config_store.rs @@ -1,6 +1,7 @@ //! Axum adapter config store: env vars with in-memory defaults fallback. use std::collections::HashMap; +use std::env; use edgezero_core::config_store::{ConfigStore, ConfigStoreError}; @@ -13,41 +14,52 @@ use edgezero_core::config_store::{ConfigStore, ConfigStoreError}; /// declared in `[stores.config.defaults]`. Use an empty-string default when a /// key should be overrideable from env without carrying a real default value. pub struct AxumConfigStore { - env: HashMap, defaults: HashMap, + env: HashMap, } impl AxumConfigStore { - /// Create from env vars and optional manifest defaults. - pub fn new( - env: impl IntoIterator, - defaults: impl IntoIterator, - ) -> Self { - Self { - env: env.into_iter().collect(), - defaults: defaults.into_iter().collect(), - } - } - /// Create from the current process environment and manifest defaults. - pub fn from_env(defaults: impl IntoIterator) -> Self { - Self::from_lookup(defaults, |key| std::env::var(key).ok()) + #[inline] + pub fn from_env(defaults: D) -> Self + where + D: IntoIterator, + { + Self::from_lookup(defaults, |key| env::var(key).ok()) } - fn from_lookup(defaults: impl IntoIterator, mut lookup: F) -> Self + fn from_lookup(defaults: D, mut lookup: F) -> Self where + D: IntoIterator, F: FnMut(&str) -> Option, { - let defaults: HashMap = defaults.into_iter().collect(); - let env = defaults + let collected: HashMap = defaults.into_iter().collect(); + let env = collected .keys() .filter_map(|key| lookup(key).map(|value| (key.clone(), value))) .collect(); - Self { env, defaults } + Self { + defaults: collected, + env, + } + } + + /// Create from env vars and optional manifest defaults. + #[inline] + pub fn new(env: E, defaults: D) -> Self + where + E: IntoIterator, + D: IntoIterator, + { + Self { + defaults: defaults.into_iter().collect(), + env: env.into_iter().collect(), + } } } impl ConfigStore for AxumConfigStore { + #[inline] fn get(&self, key: &str) -> Result, ConfigStoreError> { Ok(self .env @@ -59,96 +71,100 @@ impl ConfigStore for AxumConfigStore { #[cfg(test)] mod tests { - use super::*; + // Run the shared contract tests against AxumConfigStore (defaults path). + edgezero_core::config_store_contract_tests!(axum_config_store_defaults_contract, { + AxumConfigStore::new( + [], + [ + ("contract.key.a".to_owned(), "value_a".to_owned()), + ("contract.key.b".to_owned(), "value_b".to_owned()), + ], + ) + }); - fn store(env: &[(&str, &str)], defaults: &[(&str, &str)]) -> AxumConfigStore { + // Run the shared contract tests against AxumConfigStore (env path). + edgezero_core::config_store_contract_tests!(axum_config_store_env_contract, { AxumConfigStore::new( - env.iter().map(|(k, v)| (k.to_string(), v.to_string())), - defaults.iter().map(|(k, v)| (k.to_string(), v.to_string())), + [ + ("contract.key.a".to_owned(), "value_a".to_owned()), + ("contract.key.b".to_owned(), "value_b".to_owned()), + ], + [], ) - } + }); - #[test] - fn axum_config_store_returns_values() { - let s = store(&[("MY_KEY", "my_val")], &[]); - assert_eq!( - s.get("MY_KEY").expect("config value"), - Some("my_val".to_string()) - ); - } + use super::*; - #[test] - fn axum_config_store_returns_none_for_missing() { - let s = store(&[], &[]); - assert_eq!(s.get("NOPE").expect("missing config"), None); + fn store(env: &[(&str, &str)], defaults: &[(&str, &str)]) -> AxumConfigStore { + AxumConfigStore::new( + env.iter() + .map(|(key, value)| ((*key).to_owned(), (*value).to_owned())), + defaults + .iter() + .map(|(key, value)| ((*key).to_owned(), (*value).to_owned())), + ) } #[test] fn axum_config_store_env_overrides_defaults() { - let s = store(&[("KEY", "from_env")], &[("KEY", "from_default")]); + let cs = store(&[("KEY", "from_env")], &[("KEY", "from_default")]); assert_eq!( - s.get("KEY").expect("config value"), - Some("from_env".to_string()) + cs.get("KEY").expect("config value"), + Some("from_env".to_owned()) ); } #[test] fn axum_config_store_falls_back_to_defaults() { - let s = store(&[], &[("KEY", "default_val")]); + let cs = store(&[], &[("KEY", "default_val")]); assert_eq!( - s.get("KEY").expect("default config"), - Some("default_val".to_string()) + cs.get("KEY").expect("default config"), + Some("default_val".to_owned()) ); } #[test] fn axum_config_store_from_env_reads_only_declared_keys() { - let s = AxumConfigStore::from_lookup( + let cs = AxumConfigStore::from_lookup( [ - ("feature.new_checkout".to_string(), "false".to_string()), - ("service.timeout_ms".to_string(), "1500".to_string()), + ("feature.new_checkout".to_owned(), "false".to_owned()), + ("service.timeout_ms".to_owned(), "1500".to_owned()), ], |key| match key { - "feature.new_checkout" => Some("true".to_string()), - "DATABASE_URL" => Some("postgres://secret".to_string()), + "feature.new_checkout" => Some("true".to_owned()), + "DATABASE_URL" => Some("postgres://secret".to_owned()), _ => None, }, ); assert_eq!( - s.get("feature.new_checkout").expect("allowed env override"), - Some("true".to_string()) + cs.get("feature.new_checkout") + .expect("allowed env override"), + Some("true".to_owned()) ); assert_eq!( - s.get("service.timeout_ms").expect("default fallback"), - Some("1500".to_string()) + cs.get("service.timeout_ms").expect("default fallback"), + Some("1500".to_owned()) ); assert_eq!( - s.get("DATABASE_URL") + cs.get("DATABASE_URL") .expect("undeclared key should stay hidden"), None ); } - // Run the shared contract tests against AxumConfigStore (env path). - edgezero_core::config_store_contract_tests!(axum_config_store_env_contract, { - AxumConfigStore::new( - [ - ("contract.key.a".to_string(), "value_a".to_string()), - ("contract.key.b".to_string(), "value_b".to_string()), - ], - [], - ) - }); + #[test] + fn axum_config_store_returns_none_for_missing() { + let cs = store(&[], &[]); + assert_eq!(cs.get("NOPE").expect("missing config"), None); + } - // Run the shared contract tests against AxumConfigStore (defaults path). - edgezero_core::config_store_contract_tests!(axum_config_store_defaults_contract, { - AxumConfigStore::new( - [], - [ - ("contract.key.a".to_string(), "value_a".to_string()), - ("contract.key.b".to_string(), "value_b".to_string()), - ], - ) - }); + #[test] + fn axum_config_store_returns_values() { + let cs = store(&[("MY_KEY", "my_val")], &[]); + assert_eq!( + cs.get("MY_KEY").expect("config value"), + Some("my_val".to_owned()) + ); + } } diff --git a/crates/edgezero-adapter-axum/src/context.rs b/crates/edgezero-adapter-axum/src/context.rs index 6fc8d9e..88e6f1e 100644 --- a/crates/edgezero-adapter-axum/src/context.rs +++ b/crates/edgezero-adapter-axum/src/context.rs @@ -9,13 +9,15 @@ pub struct AxumRequestContext { } impl AxumRequestContext { - pub fn insert(request: &mut Request, context: AxumRequestContext) { - request.extensions_mut().insert(context); - } - + #[inline] pub fn get(request: &Request) -> Option<&AxumRequestContext> { request.extensions().get::() } + + #[inline] + pub fn insert(request: &mut Request, context: AxumRequestContext) { + request.extensions_mut().insert(context); + } } #[cfg(test)] diff --git a/crates/edgezero-adapter-axum/src/dev_server.rs b/crates/edgezero-adapter-axum/src/dev_server.rs index 0a03b4c..e5b57a5 100644 --- a/crates/edgezero-adapter-axum/src/dev_server.rs +++ b/crates/edgezero-adapter-axum/src/dev_server.rs @@ -1,22 +1,27 @@ +use std::fs; use std::net::{SocketAddr, TcpListener as StdTcpListener}; use std::path::{Path, PathBuf}; +use std::sync::Arc; -use anyhow::Context; +use anyhow::Context as _; use axum::Router; +use tokio::net::TcpListener as TokioTcpListener; use tokio::runtime::Builder as RuntimeBuilder; use tokio::signal; -use tower::{service_fn, Service}; +use tower::{service_fn, Service as _}; -use edgezero_core::app::Hooks; +use edgezero_core::app::{Hooks, AXUM_ADAPTER}; use edgezero_core::config_store::ConfigStoreHandle; use edgezero_core::key_value_store::KvHandle; -use edgezero_core::manifest::ManifestLoader; +use edgezero_core::manifest::{Manifest, ManifestLoader, DEFAULT_KV_STORE_NAME}; use edgezero_core::router::RouterService; use edgezero_core::secret_store::SecretHandle; use log::LevelFilter; use simple_logger::SimpleLogger; use crate::config_store::AxumConfigStore; +use crate::key_value_store::PersistentKvStore; +use crate::secret_store::EnvSecretStore; use crate::service::EdgeZeroAxumService; #[derive(Clone, Copy, Debug, Eq, PartialEq)] @@ -25,7 +30,7 @@ enum KvInitRequirement { Required, } -/// Configuration used when running the dev server embedding EdgeZero into Axum. +/// Configuration used when running the dev server embedding `EdgeZero` into Axum. #[derive(Clone)] pub struct AxumDevServerConfig { pub addr: SocketAddr, @@ -33,6 +38,7 @@ pub struct AxumDevServerConfig { } impl Default for AxumDevServerConfig { + #[inline] fn default() -> Self { Self { addr: SocketAddr::from(([127, 0, 0, 1], 8787)), @@ -55,56 +61,27 @@ struct Stores { secrets: Option, } -/// Blocking dev server runner used by the EdgeZero CLI. +/// Blocking dev server runner used by the `EdgeZero` CLI. pub struct AxumDevServer { - router: RouterService, config: AxumDevServerConfig, + router: RouterService, stores: Stores, } impl AxumDevServer { + #[must_use] + #[inline] pub fn new(router: RouterService) -> Self { Self { - router, config: AxumDevServerConfig::default(), - stores: Stores::default(), - } - } - - pub fn with_config(router: RouterService, config: AxumDevServerConfig) -> Self { - Self { router, - config, stores: Stores::default(), } } - #[must_use] - pub fn with_config_store(mut self, handle: ConfigStoreHandle) -> Self { - self.stores.config_store = Some(handle); - self - } - - /// Attach a KV store to the dev server. - /// - /// The handle is shared across all requests, making the `Kv` extractor - /// available in handlers. - #[must_use] - pub fn with_kv_handle(mut self, handle: KvHandle) -> Self { - self.stores.kv = Some(handle); - self - } - - /// Attach a secret store to the dev server. - /// - /// The handle is shared across all requests, making the `Secrets` extractor - /// available in handlers. - #[must_use] - pub fn with_secret_handle(mut self, handle: SecretHandle) -> Self { - self.stores.secrets = Some(handle); - self - } - + /// # Errors + /// Returns an error if the dev server fails to bind, the Tokio runtime fails to start, or the underlying request loop returns an error. + #[inline] pub fn run(self) -> anyhow::Result<()> { let runtime = RuntimeBuilder::new_multi_thread() .enable_all() @@ -122,20 +99,20 @@ impl AxumDevServer { } = self; // Allow binding to already-open listener if caller created one to surface errors early. - let listener = StdTcpListener::bind(config.addr) + let std_listener = StdTcpListener::bind(config.addr) .with_context(|| format!("failed to bind dev server to {}", config.addr))?; - listener + std_listener .set_nonblocking(true) .context("failed to set listener to non-blocking")?; - let listener = tokio::net::TcpListener::from_std(listener) + let listener = TokioTcpListener::from_std(std_listener) .context("failed to adopt std listener into tokio")?; serve_with_stores(router, listener, config.enable_ctrl_c, stores).await } #[cfg(test)] - async fn run_with_listener(self, listener: tokio::net::TcpListener) -> anyhow::Result<()> { + async fn run_with_listener(self, listener: TokioTcpListener) -> anyhow::Result<()> { let AxumDevServer { router, config, @@ -143,9 +120,48 @@ impl AxumDevServer { } = self; serve_with_stores(router, listener, config.enable_ctrl_c, stores).await } + + #[must_use] + #[inline] + pub fn with_config(router: RouterService, config: AxumDevServerConfig) -> Self { + Self { + config, + router, + stores: Stores::default(), + } + } + + #[must_use] + #[inline] + pub fn with_config_store(mut self, handle: ConfigStoreHandle) -> Self { + self.stores.config_store = Some(handle); + self + } + + /// Attach a KV store to the dev server. + /// + /// The handle is shared across all requests, making the `Kv` extractor + /// available in handlers. + #[must_use] + #[inline] + pub fn with_kv_handle(mut self, handle: KvHandle) -> Self { + self.stores.kv = Some(handle); + self + } + + /// Attach a secret store to the dev server. + /// + /// The handle is shared across all requests, making the `Secrets` extractor + /// available in handlers. + #[must_use] + #[inline] + pub fn with_secret_handle(mut self, handle: SecretHandle) -> Self { + self.stores.secrets = Some(handle); + self + } } -fn kv_init_requirement(manifest: &edgezero_core::manifest::Manifest) -> KvInitRequirement { +fn kv_init_requirement(manifest: &Manifest) -> KvInitRequirement { if manifest.stores.kv.is_some() { KvInitRequirement::Required } else { @@ -154,7 +170,7 @@ fn kv_init_requirement(manifest: &edgezero_core::manifest::Manifest) -> KvInitRe } fn kv_store_path(store_name: &str) -> PathBuf { - if store_name == edgezero_core::manifest::DEFAULT_KV_STORE_NAME { + if store_name == DEFAULT_KV_STORE_NAME { return PathBuf::from(".edgezero/kv.redb"); } @@ -171,18 +187,14 @@ fn store_name_slug(store_name: &str) -> String { let mut slug = String::with_capacity(MAX_SLUG_LEN); let mut last_was_separator = false; for ch in store_name.chars() { - let mapped = if ch.is_ascii_alphanumeric() { - Some(ch.to_ascii_lowercase()) - } else { - None - }; + let mapped = ch.is_ascii_alphanumeric().then(|| ch.to_ascii_lowercase()); match mapped { - Some(ch) => { + Some(lower_ch) => { if slug.len() == MAX_SLUG_LEN { break; } - slug.push(ch); + slug.push(lower_ch); last_was_separator = false; } None if !slug.is_empty() && !last_was_separator => { @@ -201,7 +213,7 @@ fn store_name_slug(store_name: &str) -> String { } if slug.is_empty() { - "store".to_string() + "store".to_owned() } else { slug } @@ -209,62 +221,56 @@ fn store_name_slug(store_name: &str) -> String { fn stable_store_name_hash(store_name: &str) -> u64 { // Deterministic FNV-1a keeps local KV file names stable across processes. - let mut hash = 0xcbf29ce484222325u64; + let mut hash = 0xcbf2_9ce4_8422_2325_u64; for byte in store_name.as_bytes() { hash ^= u64::from(*byte); - hash = hash.wrapping_mul(0x100000001b3); + hash = hash.wrapping_mul(0x0000_0001_0000_01b3); } hash } -fn kv_handle_from_path(kv_path: &Path) -> anyhow::Result { +fn kv_handle_from_path(kv_path: &Path) -> anyhow::Result { if let Some(parent) = kv_path.parent() { - std::fs::create_dir_all(parent).context("failed to create KV store directory")?; + fs::create_dir_all(parent).context("failed to create KV store directory")?; } - let kv_store = std::sync::Arc::new( - crate::key_value_store::PersistentKvStore::new(kv_path) - .context("failed to create KV store")?, - ); + let kv_store = Arc::new(PersistentKvStore::new(kv_path).context("failed to create KV store")?); log::info!("KV store: {}", kv_path.display()); - Ok(edgezero_core::key_value_store::KvHandle::new(kv_store)) + Ok(KvHandle::new(kv_store)) } async fn serve_with_stores( router: RouterService, - listener: tokio::net::TcpListener, + listener: TokioTcpListener, enable_ctrl_c: bool, stores: Stores, ) -> anyhow::Result<()> { - let mut service = EdgeZeroAxumService::new(router); - if let Some(handle) = stores.config_store { - service = service.with_config_store_handle(handle); - } - if let Some(handle) = stores.kv { - service = service.with_kv_handle(handle); - } - if let Some(handle) = stores.secrets { - service = service.with_secret_handle(handle); - } - - let service = service; - let router = Router::new().fallback_service(service_fn(move |req| { + let service = { + let mut service = EdgeZeroAxumService::new(router); + if let Some(handle) = stores.config_store { + service = service.with_config_store_handle(handle); + } + if let Some(handle) = stores.kv { + service = service.with_kv_handle(handle); + } + if let Some(handle) = stores.secrets { + service = service.with_secret_handle(handle); + } + service + }; + let axum_router = Router::new().fallback_service(service_fn(move |req| { let mut svc = service.clone(); async move { svc.call(req).await } })); - let make_service = router.into_make_service_with_connect_info::(); + let make_service = axum_router.into_make_service_with_connect_info::(); - let shutdown = if enable_ctrl_c { - Some(async { - let _ = signal::ctrl_c().await; - }) - } else { - None - }; + let shutdown = enable_ctrl_c.then_some(async { + let _ctrl_c = signal::ctrl_c().await; + }); let server = axum::serve(listener, make_service); - if let Some(shutdown) = shutdown { - let server = server.with_graceful_shutdown(shutdown); - server.await.context("axum server error")?; + if let Some(shutdown_signal) = shutdown { + let graceful_server = server.with_graceful_shutdown(shutdown_signal); + graceful_server.await.context("axum server error")?; } else { server.await.context("axum server error")?; } @@ -272,25 +278,26 @@ async fn serve_with_stores( Ok(()) } +/// # Errors +/// Returns an error if the dev server fails to bind or any required store handle cannot be initialised. +#[inline] pub fn run_app(manifest_src: &str) -> anyhow::Result<()> { - let manifest = ManifestLoader::load_from_str(manifest_src); - let m = manifest.manifest(); - let logging = m.logging_or_default(edgezero_core::app::AXUM_ADAPTER); - let kv_init_requirement = kv_init_requirement(m); - let kv_store_name = m - .kv_store_name(edgezero_core::app::AXUM_ADAPTER) - .to_string(); + let manifest = ManifestLoader::try_load_from_str(manifest_src)?; + let manifest_data = manifest.manifest(); + let logging = manifest_data.logging_or_default(AXUM_ADAPTER); + let kv_init_requirement = kv_init_requirement(manifest_data); + let kv_store_name = manifest_data.kv_store_name(AXUM_ADAPTER).to_owned(); let kv_path = kv_store_path(&kv_store_name); - let has_secret_store = m.secret_store_enabled("axum"); + let has_secret_store = manifest_data.secret_store_enabled("axum"); - let level: LevelFilter = logging.level.into(); + let configured_level: LevelFilter = logging.level.into(); let level = if logging.echo_stdout.unwrap_or(true) { - level + configured_level } else { LevelFilter::Off }; - SimpleLogger::new().with_level(level).init().ok(); + let _logger_init = SimpleLogger::new().with_level(level).init(); let app = A::build_app(); let router = app.router().clone(); @@ -301,12 +308,12 @@ pub fn run_app(manifest_src: &str) -> anyhow::Result<()> { runtime.block_on(async move { let config = AxumDevServerConfig::default(); - let listener = StdTcpListener::bind(config.addr) + let std_listener = StdTcpListener::bind(config.addr) .with_context(|| format!("failed to bind dev server to {}", config.addr))?; - listener + std_listener .set_nonblocking(true) .context("failed to set listener to non-blocking")?; - let listener = tokio::net::TcpListener::from_std(listener) + let listener = TokioTcpListener::from_std(std_listener) .context("failed to adopt std listener into tokio")?; let kv_handle = match kv_handle_from_path(&kv_path) { @@ -336,22 +343,17 @@ pub fn run_app(manifest_src: &str) -> anyhow::Result<()> { // Unlike Fastly and Cloudflare, it does not check A::config_store() first. // If a user implements Hooks::config_store() without a [stores.config] section // in edgezero.toml, the override is silently ignored on Axum. - if A::config_store().is_some() && m.stores.config.is_none() { + if A::config_store().is_some() && manifest_data.stores.config.is_none() { log::warn!("A::config_store() is set but [stores.config] is missing in the manifest. This override is ignored on Axum."); } - let config_store_handle = m.stores.config.as_ref().map(|cfg| { + let config_store_handle = manifest_data.stores.config.as_ref().map(|cfg| { let defaults = cfg.config_store_defaults().clone(); let store = AxumConfigStore::from_env(defaults); - ConfigStoreHandle::new(std::sync::Arc::new(store)) + ConfigStoreHandle::new(Arc::new(store)) }); - let secret = if has_secret_store { - log::info!("Secret store: reading from environment variables"); - Some(SecretHandle::new(std::sync::Arc::new( - crate::secret_store::EnvSecretStore::new(), - ))) - } else { - None - }; + let secret = has_secret_store.then(|| { log::info!("Secret store: reading from environment variables"); SecretHandle::new(Arc::new( + EnvSecretStore::new(), + )) }); let stores = Stores { config_store: config_store_handle, kv: kv_handle, @@ -369,7 +371,7 @@ mod tests { #[test] fn default_config_uses_expected_address() { let config = AxumDevServerConfig::default(); - assert_eq!(config.addr.ip(), IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1))); + assert_eq!(config.addr.ip(), IpAddr::V4(Ipv4Addr::LOCALHOST)); assert_eq!(config.addr.port(), 8787); } @@ -394,7 +396,7 @@ mod tests { addr, enable_ctrl_c: false, }; - assert_eq!(config.addr.ip(), IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))); + assert_eq!(config.addr.ip(), IpAddr::V4(Ipv4Addr::UNSPECIFIED)); assert_eq!(config.addr.port(), 3000); assert!(!config.enable_ctrl_c); } @@ -426,7 +428,7 @@ mod tests { #[test] fn default_store_name_uses_legacy_kv_path() { assert_eq!( - kv_store_path(edgezero_core::manifest::DEFAULT_KV_STORE_NAME), + kv_store_path(DEFAULT_KV_STORE_NAME), PathBuf::from(".edgezero/kv.redb") ); } @@ -495,16 +497,25 @@ mod integration_tests { use edgezero_core::error::EdgeError; use edgezero_core::extractor::Secrets; use edgezero_core::router::RouterService; + use edgezero_core::secret_store::SecretHandle as CoreSecretHandle; + use std::iter; use std::time::{Duration, Instant}; + use tokio::task::{spawn_blocking, JoinHandle}; + use tokio::time::sleep; struct TestServer { - base_url: String, - handle: tokio::task::JoinHandle<()>, _temp_dir: tempfile::TempDir, + base_url: String, + handle: JoinHandle<()>, + } + + struct TestServerSecrets { + base_url: String, + handle: JoinHandle<()>, } async fn start_test_server(router: RouterService) -> TestServer { - let listener = tokio::net::TcpListener::bind("127.0.0.1:0") + let listener = TokioTcpListener::bind("127.0.0.1:0") .await .expect("bind test server"); let addr = listener.local_addr().expect("local addr"); @@ -519,11 +530,11 @@ mod integration_tests { let server = AxumDevServer::with_config(router, config).with_kv_handle(kv_handle); let handle = tokio::spawn(async move { - let _ = server.run_with_listener(listener).await; + let _result = server.run_with_listener(listener).await; }); TestServer { - base_url: format!("http://{}", addr), + base_url: format!("http://{addr}"), handle, _temp_dir: temp_dir, } @@ -540,13 +551,14 @@ mod integration_tests { match make_request(client).send().await { Ok(response) => return response, Err(err) => { - if start.elapsed() >= timeout { - panic!("server did not respond before timeout: {}", err); - } + assert!( + start.elapsed() < timeout, + "server did not respond before timeout: {err}" + ); } } - tokio::time::sleep(Duration::from_millis(10)).await; + sleep(Duration::from_millis(10)).await; } } @@ -561,7 +573,7 @@ mod integration_tests { let client = reqwest::Client::new(); let url = format!("{}/test", server.base_url); - let response = send_with_retry(&client, |client| client.get(url.as_str())).await; + let response = send_with_retry(&client, |http_client| http_client.get(url.as_str())).await; assert_eq!(response.status(), reqwest::StatusCode::OK); assert_eq!(response.text().await.unwrap(), "hello from dev server"); @@ -576,7 +588,7 @@ mod integration_tests { let client = reqwest::Client::new(); let url = format!("{}/nonexistent", server.base_url); - let response = send_with_retry(&client, |client| client.get(url.as_str())).await; + let response = send_with_retry(&client, |http_client| http_client.get(url.as_str())).await; assert_eq!(response.status(), reqwest::StatusCode::NOT_FOUND); @@ -594,7 +606,7 @@ mod integration_tests { let client = reqwest::Client::new(); let url = format!("{}/submit", server.base_url); - let response = send_with_retry(&client, |client| client.get(url.as_str())).await; + let response = send_with_retry(&client, |http_client| http_client.get(url.as_str())).await; assert_eq!(response.status(), reqwest::StatusCode::METHOD_NOT_ALLOWED); @@ -608,9 +620,9 @@ mod integration_tests { .request() .headers() .get("x-custom") - .and_then(|v| v.to_str().ok()) + .and_then(|val| val.to_str().ok()) .unwrap_or("missing"); - Ok(value.to_string()) + Ok(value.to_owned()) } let router = RouterService::builder().get("/headers", handler).build(); @@ -618,8 +630,8 @@ mod integration_tests { let client = reqwest::Client::new(); let url = format!("{}/headers", server.base_url); - let response = send_with_retry(&client, |client| { - client.get(url.as_str()).header("x-custom", "my-value") + let response = send_with_retry(&client, |http_client| { + http_client.get(url.as_str()).header("x-custom", "my-value") }) .await; @@ -632,7 +644,7 @@ mod integration_tests { #[tokio::test(flavor = "multi_thread")] async fn server_fails_to_bind_to_used_port() { // First bind to a port - let listener = std::net::TcpListener::bind("127.0.0.1:0").expect("bind first"); + let listener = StdTcpListener::bind("127.0.0.1:0").expect("bind first"); let addr = listener.local_addr().expect("listener addr"); // Try to start server on same port @@ -644,15 +656,14 @@ mod integration_tests { let server = AxumDevServer::with_config(router, config); // Run in blocking mode to capture the error - let result = tokio::task::spawn_blocking(move || server.run()).await; + let result = spawn_blocking(move || server.run()).await; match result { - Ok(Err(e)) => { - let err_str = e.to_string(); + Ok(Err(err)) => { + let err_str = err.to_string(); assert!( err_str.contains("bind") || err_str.contains("address"), - "expected bind error, got: {}", - err_str + "expected bind error, got: {err_str}" ); } _ => panic!("expected bind error"), @@ -665,13 +676,13 @@ mod integration_tests { async fn kv_store_persists_across_requests() { async fn write_handler(ctx: RequestContext) -> Result<&'static str, EdgeError> { let store = ctx.kv_handle().expect("kv configured"); - store.put("counter", &42i32).await?; + store.put("counter", &42_i32).await?; Ok("written") } async fn read_handler(ctx: RequestContext) -> Result { let store = ctx.kv_handle().expect("kv configured"); - let val: i32 = store.get_or("counter", 0).await?; + let val: i32 = store.get_or("counter", 0_i32).await?; Ok(val.to_string()) } @@ -685,15 +696,17 @@ mod integration_tests { // Write a value let write_url = format!("{}/write", server.base_url); - let response = send_with_retry(&client, |client| client.post(write_url.as_str())).await; - assert_eq!(response.status(), reqwest::StatusCode::OK); - assert_eq!(response.text().await.unwrap(), "written"); + let write_response = + send_with_retry(&client, |http_client| http_client.post(write_url.as_str())).await; + assert_eq!(write_response.status(), reqwest::StatusCode::OK); + assert_eq!(write_response.text().await.unwrap(), "written"); // Read it back — proves shared state across requests let read_url = format!("{}/read", server.base_url); - let response = send_with_retry(&client, |client| client.get(read_url.as_str())).await; - assert_eq!(response.status(), reqwest::StatusCode::OK); - assert_eq!(response.text().await.unwrap(), "42"); + let read_response = + send_with_retry(&client, |http_client| http_client.get(read_url.as_str())).await; + assert_eq!(read_response.status(), reqwest::StatusCode::OK); + assert_eq!(read_response.text().await.unwrap(), "42"); server.handle.abort(); } @@ -727,22 +740,23 @@ mod integration_tests { let client = reqwest::Client::new(); // Write - let url = format!("{}/write", server.base_url); - send_with_retry(&client, |c| c.post(url.as_str())).await; + let write_url = format!("{}/write", server.base_url); + send_with_retry(&client, |http_client| http_client.post(write_url.as_str())).await; // Verify exists - let url = format!("{}/check", server.base_url); - let resp = send_with_retry(&client, |c| c.get(url.as_str())).await; - assert_eq!(resp.text().await.unwrap(), "exists=true"); + let check_url = format!("{}/check", server.base_url); + let exists_before = + send_with_retry(&client, |http_client| http_client.get(check_url.as_str())).await; + assert_eq!(exists_before.text().await.unwrap(), "exists=true"); // Delete - let url = format!("{}/delete", server.base_url); - send_with_retry(&client, |c| c.post(url.as_str())).await; + let delete_url = format!("{}/delete", server.base_url); + send_with_retry(&client, |http_client| http_client.post(delete_url.as_str())).await; // Verify gone - let url = format!("{}/check", server.base_url); - let resp = send_with_retry(&client, |c| c.get(url.as_str())).await; - assert_eq!(resp.text().await.unwrap(), "exists=false"); + let exists_after = + send_with_retry(&client, |http_client| http_client.get(check_url.as_str())).await; + assert_eq!(exists_after.text().await.unwrap(), "exists=false"); server.handle.abort(); } @@ -751,7 +765,9 @@ mod integration_tests { async fn kv_store_update_across_requests() { async fn increment_handler(ctx: RequestContext) -> Result { let kv = ctx.kv_handle().expect("kv configured"); - let val = kv.read_modify_write("counter", 0i32, |n| n + 1).await?; + let val = kv + .read_modify_write("counter", 0_i32, |n| n + 1_i32) + .await?; Ok(val.to_string()) } @@ -763,8 +779,8 @@ mod integration_tests { let url = format!("{}/inc", server.base_url); // Increment 5 times, each should return incremented value - for expected in 1..=5i32 { - let resp = send_with_retry(&client, |c| c.post(url.as_str())).await; + for expected in 1_i32..=5_i32 { + let resp = send_with_retry(&client, |http_client| http_client.post(url.as_str())).await; assert_eq!( resp.text().await.unwrap(), expected.to_string(), @@ -779,7 +795,7 @@ mod integration_tests { async fn kv_store_returns_not_found_gracefully() { async fn read_handler(ctx: RequestContext) -> Result { let kv = ctx.kv_handle().expect("kv configured"); - let val: i32 = kv.get_or("nonexistent", -1).await?; + let val: i32 = kv.get_or("nonexistent", -1_i32).await?; Ok(val.to_string()) } @@ -788,7 +804,7 @@ mod integration_tests { let client = reqwest::Client::new(); let url = format!("{}/read", server.base_url); - let resp = send_with_retry(&client, |c| c.get(url.as_str())).await; + let resp = send_with_retry(&client, |http_client| http_client.get(url.as_str())).await; assert_eq!(resp.status(), reqwest::StatusCode::OK); assert_eq!(resp.text().await.unwrap(), "-1"); @@ -801,15 +817,15 @@ mod integration_tests { #[derive(Serialize, Deserialize, PartialEq, Debug)] struct UserProfile { - name: String, - age: u32, active: bool, + age: u32, + name: String, } async fn write_handler(ctx: RequestContext) -> Result<&'static str, EdgeError> { let kv = ctx.kv_handle().expect("kv configured"); let profile = UserProfile { - name: "Alice".to_string(), + name: "Alice".to_owned(), age: 30, active: true, }; @@ -821,8 +837,8 @@ mod integration_tests { let kv = ctx.kv_handle().expect("kv configured"); let profile: Option = kv.get("user:alice").await?; match profile { - Some(p) => Ok(format!("{}:{}", p.name, p.age)), - None => Ok("not found".to_string()), + Some(found) => Ok(format!("{}:{}", found.name, found.age)), + None => Ok("not found".to_owned()), } } @@ -834,14 +850,16 @@ mod integration_tests { let client = reqwest::Client::new(); // Save profile - let url = format!("{}/save", server.base_url); - let resp = send_with_retry(&client, |c| c.post(url.as_str())).await; - assert_eq!(resp.text().await.unwrap(), "saved"); + let save_url = format!("{}/save", server.base_url); + let save_resp = + send_with_retry(&client, |http_client| http_client.post(save_url.as_str())).await; + assert_eq!(save_resp.text().await.unwrap(), "saved"); // Load profile - let url = format!("{}/load", server.base_url); - let resp = send_with_retry(&client, |c| c.get(url.as_str())).await; - assert_eq!(resp.text().await.unwrap(), "Alice:30"); + let load_url = format!("{}/load", server.base_url); + let load_resp = + send_with_retry(&client, |http_client| http_client.get(load_url.as_str())).await; + assert_eq!(load_resp.text().await.unwrap(), "Alice:30"); server.handle.abort(); } @@ -850,16 +868,11 @@ mod integration_tests { // Secret store helpers // ----------------------------------------------------------------------- - struct TestServerSecrets { - base_url: String, - handle: tokio::task::JoinHandle<()>, - } - async fn start_test_server_with_secret_handle( router: RouterService, - secret_handle: Option, + secret_handle: Option, ) -> TestServerSecrets { - let listener = tokio::net::TcpListener::bind("127.0.0.1:0") + let listener = TokioTcpListener::bind("127.0.0.1:0") .await .expect("bind secrets test server"); let addr = listener.local_addr().expect("local addr"); @@ -868,14 +881,14 @@ mod integration_tests { enable_ctrl_c: false, }; let mut server = super::AxumDevServer::with_config(router, config); - if let Some(h) = secret_handle { - server = server.with_secret_handle(h); + if let Some(handle) = secret_handle { + server = server.with_secret_handle(handle); } let handle = tokio::spawn(async move { - let _ = server.run_with_listener(listener).await; + let _result = server.run_with_listener(listener).await; }); TestServerSecrets { - base_url: format!("http://{}", addr), + base_url: format!("http://{addr}"), handle, } } @@ -907,7 +920,7 @@ mod integration_tests { let client = reqwest::Client::new(); let url = format!("{}/secret", server.base_url); - let response = send_with_retry(&client, |c| c.get(url.as_str())).await; + let response = send_with_retry(&client, |http_client| http_client.get(url.as_str())).await; assert_eq!(response.status(), reqwest::StatusCode::OK); assert_eq!(response.text().await.unwrap(), "s3cr3t"); @@ -923,13 +936,13 @@ mod integration_tests { let router = RouterService::builder() .get("/secret", secret_value_handler) .build(); - let store = InMemorySecretStore::new(std::iter::empty::<(&str, bytes::Bytes)>()); + let store = InMemorySecretStore::new(iter::empty::<(&str, bytes::Bytes)>()); let handle = SecretHandle::new(Arc::new(store)); let server = start_test_server_with_secret_handle(router, Some(handle)).await; let client = reqwest::Client::new(); let url = format!("{}/secret", server.base_url); - let response = send_with_retry(&client, |c| c.get(url.as_str())).await; + let response = send_with_retry(&client, |http_client| http_client.get(url.as_str())).await; assert_eq!( response.status(), @@ -951,7 +964,7 @@ mod integration_tests { let client = reqwest::Client::new(); let url = format!("{}/secret", server.base_url); - let response = send_with_retry(&client, |c| c.get(url.as_str())).await; + let response = send_with_retry(&client, |http_client| http_client.get(url.as_str())).await; assert_eq!( response.status(), diff --git a/crates/edgezero-adapter-axum/src/key_value_store.rs b/crates/edgezero-adapter-axum/src/key_value_store.rs index 190bf6a..49e84ba 100644 --- a/crates/edgezero-adapter-axum/src/key_value_store.rs +++ b/crates/edgezero-adapter-axum/src/key_value_store.rs @@ -50,11 +50,11 @@ use std::time::Duration; use async_trait::async_trait; use bytes::Bytes; use edgezero_core::key_value_store::{KvError, KvPage, KvStore}; -use redb::{Database, ReadableDatabase, ReadableTable, TableDefinition}; +use redb::{Database, ReadableDatabase as _, ReadableTable as _, TableDefinition}; use std::time::SystemTime; /// Table definition for the KV store. -/// Key: String, Value: (Bytes, Option) +/// Key: `String`, Value: `(Bytes, Option)` const KV_TABLE: TableDefinition<&str, (&[u8], Option)> = TableDefinition::new("kv"); /// Type alias for a writable KV table handle. @@ -83,33 +83,41 @@ impl PersistentKvStore { /// call. A warning is logged once so operators know cleanup is needed. const MAX_SCAN_BATCHES: usize = 100; - /// Create a new persistent KV store at the given path. - /// - /// # Behavior - /// - /// - If the file does not exist, a new database will be initialized - /// - If the file exists and is a valid redb database, it will be opened with existing data preserved - /// - If the file exists but is not a valid redb database, returns an error - pub fn new>(path: P) -> Result { - let db_path = path.as_ref().to_path_buf(); - let db = Database::create(path).map_err(|e| { - KvError::Internal(anyhow::anyhow!( - "Failed to open KV database at {:?}. If the file is corrupted or locked \ - by another process, try deleting it and restarting: {}", - db_path, - e - )) - })?; + fn begin_write(&self) -> Result { + self.db + .begin_write() + .map_err(|err| KvError::Internal(anyhow::anyhow!("failed to begin write txn: {err}"))) + } - // Initialize the table - let store = Self { db }; - let write_txn = store.begin_write()?; + fn cleanup_expired_keys(&self, expired_keys: &[String]) -> Result<(), KvError> { + if expired_keys.is_empty() { + return Ok(()); + } + + let write_txn = self.begin_write()?; { - let _table = Self::open_table(&write_txn)?; + let mut table = Self::open_table(&write_txn)?; + for key in expired_keys { + let still_expired = table + .get(key.as_str()) + .map_err(|err| KvError::Internal(anyhow::anyhow!("failed to get key: {err}")))? + .is_some_and(|entry| { + let (_, expires_at) = entry.value(); + Self::is_expired(expires_at) + }); + if still_expired { + table.remove(key.as_str()).map_err(|err| { + KvError::Internal(anyhow::anyhow!("failed to remove: {err}")) + })?; + } + } } - Self::commit(write_txn)?; + Self::commit(write_txn) + } - Ok(store) + fn commit(txn: redb::WriteTransaction) -> Result<(), KvError> { + txn.commit() + .map_err(|err| KvError::Internal(anyhow::anyhow!("failed to commit: {err}"))) } /// Check if an entry is expired based on its expiration timestamp. @@ -131,75 +139,84 @@ impl PersistentKvStore { } } - /// Convert SystemTime to milliseconds since UNIX epoch. + /// Create a new persistent KV store at the given path. /// - /// Returns 0 if the time is before UNIX epoch (should never happen in practice). - fn system_time_to_millis(time: SystemTime) -> u128 { - time.duration_since(SystemTime::UNIX_EPOCH) - .map(|d| d.as_millis()) - .unwrap_or(0) - } + /// # Behavior + /// + /// - If the file does not exist, a new database will be initialized + /// - If the file exists and is a valid redb database, it will be opened with existing data preserved + /// - If the file exists but is not a valid redb database, returns an error + /// + /// # Errors + /// Returns an error if the database file cannot be opened or initialised (corrupted file, locked by another process, or insufficient permissions). + #[inline] + pub fn new>(path: P) -> Result { + let db_path = path.as_ref().display().to_string(); + let db = Database::create(path).map_err(|err| { + KvError::Internal(anyhow::anyhow!( + "Failed to open KV database at {db_path}. If the file is corrupted or locked \ + by another process, try deleting it and restarting: {err}" + )) + })?; - // -- Transaction helpers ------------------------------------------------ + // Initialize the table + let store = Self { db }; + let write_txn = store.begin_write()?; + { + let _table = Self::open_table(&write_txn)?; + } + Self::commit(write_txn)?; - fn begin_write(&self) -> Result { - self.db - .begin_write() - .map_err(|e| KvError::Internal(anyhow::anyhow!("failed to begin write txn: {}", e))) + Ok(store) } - fn open_table<'txn>(txn: &'txn redb::WriteTransaction) -> Result, KvError> { + fn open_table(txn: &redb::WriteTransaction) -> Result, KvError> { txn.open_table(KV_TABLE) - .map_err(|e| KvError::Internal(anyhow::anyhow!("failed to open table: {}", e))) + .map_err(|err| KvError::Internal(anyhow::anyhow!("failed to open table: {err}"))) } - fn commit(txn: redb::WriteTransaction) -> Result<(), KvError> { - txn.commit() - .map_err(|e| KvError::Internal(anyhow::anyhow!("failed to commit: {}", e))) + /// Convert `SystemTime` to milliseconds since UNIX epoch. + /// + /// Returns 0 if the time is before UNIX epoch (should never happen in practice). + fn system_time_to_millis(time: SystemTime) -> u128 { + time.duration_since(SystemTime::UNIX_EPOCH) + .map(|duration| duration.as_millis()) + .unwrap_or(0) } +} - fn cleanup_expired_keys(&self, expired_keys: &[String]) -> Result<(), KvError> { - if expired_keys.is_empty() { - return Ok(()); - } - +#[async_trait(?Send)] +impl KvStore for PersistentKvStore { + #[inline] + async fn delete(&self, key: &str) -> Result<(), KvError> { let write_txn = self.begin_write()?; - { - let mut table = Self::open_table(&write_txn)?; - for key in expired_keys { - let still_expired = table - .get(key.as_str()) - .map_err(|e| KvError::Internal(anyhow::anyhow!("failed to get key: {}", e)))? - .is_some_and(|entry| { - let (_, expires_at) = entry.value(); - Self::is_expired(expires_at) - }); - if still_expired { - table.remove(key.as_str()).map_err(|e| { - KvError::Internal(anyhow::anyhow!("failed to remove: {}", e)) - })?; - } - } - } + let mut table = Self::open_table(&write_txn)?; + table + .remove(key) + .map_err(|err| KvError::Internal(anyhow::anyhow!("failed to remove: {err}")))?; + drop(table); Self::commit(write_txn) } -} -#[async_trait(?Send)] -impl KvStore for PersistentKvStore { + #[inline] + async fn exists(&self, key: &str) -> Result { + Ok(self.get_bytes(key).await?.is_some()) + } + + #[inline] async fn get_bytes(&self, key: &str) -> Result, KvError> { let read_txn = self .db .begin_read() - .map_err(|e| KvError::Internal(anyhow::anyhow!("failed to begin read txn: {}", e)))?; + .map_err(|err| KvError::Internal(anyhow::anyhow!("failed to begin read txn: {err}")))?; let table = read_txn .open_table(KV_TABLE) - .map_err(|e| KvError::Internal(anyhow::anyhow!("failed to open table: {}", e)))?; + .map_err(|err| KvError::Internal(anyhow::anyhow!("failed to open table: {err}")))?; if let Some(entry) = table .get(key) - .map_err(|e| KvError::Internal(anyhow::anyhow!("failed to get key: {}", e)))? + .map_err(|err| KvError::Internal(anyhow::anyhow!("failed to get key: {err}")))? { let (value_bytes, expires_at) = entry.value(); @@ -212,22 +229,22 @@ impl KvStore for PersistentKvStore { // Delete the expired key let write_txn = self.begin_write()?; { - let mut table = Self::open_table(&write_txn)?; + let mut write_table = Self::open_table(&write_txn)?; // Re-check expiry inside write txn to avoid TOCTOU race: // a concurrent put_bytes may have overwritten the key with // a fresh value between our read and this write. - let still_expired = table + let still_expired = write_table .get(key) - .map_err(|e| { - KvError::Internal(anyhow::anyhow!("failed to get key: {}", e)) + .map_err(|err| { + KvError::Internal(anyhow::anyhow!("failed to get key: {err}")) })? - .is_some_and(|entry| { - let (_, exp) = entry.value(); + .is_some_and(|fresh_entry| { + let (_, exp) = fresh_entry.value(); Self::is_expired(exp) }); if still_expired { - table.remove(key).map_err(|e| { - KvError::Internal(anyhow::anyhow!("failed to remove: {}", e)) + write_table.remove(key).map_err(|err| { + KvError::Internal(anyhow::anyhow!("failed to remove: {err}")) })?; } } @@ -242,47 +259,7 @@ impl KvStore for PersistentKvStore { } } - async fn put_bytes(&self, key: &str, value: Bytes) -> Result<(), KvError> { - let write_txn = self.begin_write()?; - { - let mut table = Self::open_table(&write_txn)?; - table - .insert(key, (value.as_ref(), None)) - .map_err(|e| KvError::Internal(anyhow::anyhow!("failed to insert: {}", e)))?; - } - Self::commit(write_txn) - } - - async fn put_bytes_with_ttl( - &self, - key: &str, - value: Bytes, - ttl: Duration, - ) -> Result<(), KvError> { - let expires_at = SystemTime::now() + ttl; - let expires_at_millis = Self::system_time_to_millis(expires_at); - - let write_txn = self.begin_write()?; - { - let mut table = Self::open_table(&write_txn)?; - table - .insert(key, (value.as_ref(), Some(expires_at_millis))) - .map_err(|e| KvError::Internal(anyhow::anyhow!("failed to insert: {}", e)))?; - } - Self::commit(write_txn) - } - - async fn delete(&self, key: &str) -> Result<(), KvError> { - let write_txn = self.begin_write()?; - { - let mut table = Self::open_table(&write_txn)?; - table - .remove(key) - .map_err(|e| KvError::Internal(anyhow::anyhow!("failed to remove: {}", e)))?; - } - Self::commit(write_txn) - } - + #[inline] async fn list_keys_page( &self, prefix: &str, @@ -294,45 +271,47 @@ impl KvStore for PersistentKvStore { let mut reached_end = false; let mut batch_count: usize = 0; - while live_keys.len() < limit + 1 && !reached_end { + while live_keys.len() < limit.saturating_add(1) && !reached_end { if batch_count >= Self::MAX_SCAN_BATCHES { log::warn!( "list_keys_page: scanned {} batches ({} entries) without filling the \ requested page; the database likely contains a large number of expired \ entries. Returning partial page. Run a KV cleanup to improve performance.", Self::MAX_SCAN_BATCHES, - Self::MAX_SCAN_BATCHES * Self::LIST_SCAN_BATCH_SIZE, + Self::MAX_SCAN_BATCHES.saturating_mul(Self::LIST_SCAN_BATCH_SIZE), ); break; } - batch_count += 1; + batch_count = batch_count.saturating_add(1); let mut expired_keys = Vec::new(); { - let read_txn = self.db.begin_read().map_err(|e| { - KvError::Internal(anyhow::anyhow!("failed to begin read txn: {}", e)) + let read_txn = self.db.begin_read().map_err(|err| { + KvError::Internal(anyhow::anyhow!("failed to begin read txn: {err}")) })?; - let table = read_txn.open_table(KV_TABLE).map_err(|e| { - KvError::Internal(anyhow::anyhow!("failed to open table: {}", e)) + let table = read_txn.open_table(KV_TABLE).map_err(|err| { + KvError::Internal(anyhow::anyhow!("failed to open table: {err}")) })?; let mut iter = if prefix.is_empty() { match scan_cursor.as_deref() { - Some(cursor) => { - table.range::<&str>((Bound::Excluded(cursor), Bound::Unbounded)) + Some(scan_from) => { + table.range::<&str>((Bound::Excluded(scan_from), Bound::Unbounded)) } None => table.iter(), } } else { match scan_cursor.as_deref() { - Some(cursor) if cursor >= prefix => { - table.range::<&str>((Bound::Excluded(cursor), Bound::Unbounded)) + Some(scan_from) if scan_from >= prefix => { + table.range::<&str>((Bound::Excluded(scan_from), Bound::Unbounded)) } _ => table.range(prefix..), } } - .map_err(|e| KvError::Internal(anyhow::anyhow!("failed to create range: {}", e)))?; + .map_err(|err| { + KvError::Internal(anyhow::anyhow!("failed to create range: {err}")) + })?; for _ in 0..Self::LIST_SCAN_BATCH_SIZE { let Some(entry) = iter.next() else { @@ -340,10 +319,10 @@ impl KvStore for PersistentKvStore { break; }; - let (key, value) = entry.map_err(|e| { - KvError::Internal(anyhow::anyhow!("failed to read range entry: {}", e)) + let (key_handle, value) = entry.map_err(|err| { + KvError::Internal(anyhow::anyhow!("failed to read range entry: {err}")) })?; - let key = key.value().to_string(); + let key = key_handle.value().to_owned(); if !prefix.is_empty() && !key.starts_with(prefix) { reached_end = true; @@ -359,7 +338,7 @@ impl KvStore for PersistentKvStore { } live_keys.push(key); - if live_keys.len() == limit + 1 { + if live_keys.len() == limit.saturating_add(1) { break; } } @@ -378,13 +357,64 @@ impl KvStore for PersistentKvStore { keys: live_keys, }) } + + #[inline] + async fn put_bytes(&self, key: &str, value: Bytes) -> Result<(), KvError> { + let write_txn = self.begin_write()?; + let mut table = Self::open_table(&write_txn)?; + table + .insert(key, (value.as_ref(), None)) + .map_err(|err| KvError::Internal(anyhow::anyhow!("failed to insert: {err}")))?; + drop(table); + Self::commit(write_txn) + } + + #[inline] + async fn put_bytes_with_ttl( + &self, + key: &str, + value: Bytes, + ttl: Duration, + ) -> Result<(), KvError> { + let expires_at = SystemTime::now() + .checked_add(ttl) + .ok_or_else(|| KvError::Internal(anyhow::anyhow!("ttl overflows system time")))?; + let expires_at_millis = Self::system_time_to_millis(expires_at); + + let write_txn = self.begin_write()?; + let mut table = Self::open_table(&write_txn)?; + table + .insert(key, (value.as_ref(), Some(expires_at_millis))) + .map_err(|err| KvError::Internal(anyhow::anyhow!("failed to insert: {err}")))?; + drop(table); + Self::commit(write_txn) + } } #[cfg(test)] mod tests { + // Run the shared contract tests against PersistentKvStore. + // `Box::leak` intentionally extends the TempDir's lifetime to 'static so + // it remains alive for the duration of the test process. The directory is + // deleted when the process exits, unlike `.keep()` which leaves it behind + // permanently. + edgezero_core::key_value_store_contract_tests!(persistent_kv_contract, { + let dir = Box::leak(Box::new(tempfile::tempdir().unwrap())); + let db_path = dir.path().join("contract.redb"); + PersistentKvStore::new(db_path).unwrap() + }); + use super::*; use edgezero_core::key_value_store::KvHandle; + use futures::executor; use std::sync::Arc; + use std::thread; + + #[derive(serde::Serialize, serde::Deserialize, PartialEq, Debug)] + struct Config { + enabled: bool, + name: String, + } fn store() -> (KvHandle, tempfile::TempDir) { let temp_dir = tempfile::tempdir().unwrap(); @@ -393,216 +423,221 @@ mod tests { (KvHandle::new(Arc::new(store)), temp_dir) } - // -- Raw bytes ----------------------------------------------------------- - #[tokio::test] - async fn put_and_get_bytes() { - let (s, _dir) = store(); - s.put_bytes("k", Bytes::from("hello")).await.unwrap(); - assert_eq!(s.get_bytes("k").await.unwrap(), Some(Bytes::from("hello"))); + async fn cleanup_expired_keys_does_not_delete_fresh_overwrite() { + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("test.redb"); + let kv_store = PersistentKvStore::new(db_path).unwrap(); + + kv_store + .put_bytes_with_ttl("race/key", Bytes::from("stale"), Duration::from_millis(1)) + .await + .unwrap(); + thread::sleep(Duration::from_millis(200)); + kv_store + .put_bytes("race/key", Bytes::from("fresh")) + .await + .unwrap(); + + kv_store + .cleanup_expired_keys(&["race/key".to_owned()]) + .unwrap(); + + assert_eq!( + kv_store.get_bytes("race/key").await.unwrap(), + Some(Bytes::from("fresh")) + ); } - #[tokio::test] - async fn get_missing_key_returns_none() { - let (s, _dir) = store(); - assert_eq!(s.get_bytes("missing").await.unwrap(), None); + #[test] + fn concurrent_writes_dont_panic() { + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("test.redb"); + let kv_store = PersistentKvStore::new(db_path).unwrap(); + let handle = KvHandle::new(Arc::new(kv_store)); + + // KvHandle futures are !Send (async_trait(?Send) for WASM compat), so + // tokio::spawn is off-limits. Use OS threads instead — KvHandle is + // Send + Sync, so each thread moves its own clone and runs its own + // executor. This is genuinely concurrent at the OS level. + let threads: Vec<_> = (0_i32..100_i32) + .map(|idx| { + let kv_handle = handle.clone(); + thread::spawn(move || { + executor::block_on(async move { + let key = format!("key:{idx}"); + kv_handle.put(&key, &idx).await.unwrap(); + }); + }) + }) + .collect(); + + for thread in threads { + thread.join().expect("writer thread panicked"); + } + + // Verify all 100 keys survived concurrent writes with correct values. + executor::block_on(async { + for idx in 0_i32..100_i32 { + let key = format!("key:{idx}"); + let val: i32 = handle.get_or(&key, -1_i32).await.unwrap(); + assert_eq!( + val, idx, + "key:{idx} has wrong value after concurrent writes" + ); + } + }); } #[tokio::test] - async fn put_overwrites_existing() { - let (s, _dir) = store(); - s.put_bytes("k", Bytes::from("first")).await.unwrap(); - s.put_bytes("k", Bytes::from("second")).await.unwrap(); - assert_eq!(s.get_bytes("k").await.unwrap(), Some(Bytes::from("second"))); + async fn data_persists_across_reopens() { + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("test.redb"); + + // Write data + let store = PersistentKvStore::new(&db_path).unwrap(); + store + .put_bytes("persistent", Bytes::from("value")) + .await + .unwrap(); + drop(store); + + // Reopen and verify data persists + { + let reopened = PersistentKvStore::new(&db_path).unwrap(); + let value = reopened.get_bytes("persistent").await.unwrap(); + assert_eq!(value, Some(Bytes::from("value"))); + } } #[tokio::test] - async fn delete_removes_key() { - let (s, _dir) = store(); - s.put_bytes("k", Bytes::from("v")).await.unwrap(); - s.delete("k").await.unwrap(); - assert_eq!(s.get_bytes("k").await.unwrap(), None); + async fn delete_nonexistent_is_ok() { + let (kv_store, _dir) = store(); + kv_store.delete("nope").await.unwrap(); } #[tokio::test] - async fn delete_nonexistent_is_ok() { - let (s, _dir) = store(); - s.delete("nope").await.unwrap(); + async fn delete_removes_key() { + let (kv_store, _dir) = store(); + kv_store.put_bytes("k", Bytes::from("v")).await.unwrap(); + kv_store.delete("k").await.unwrap(); + assert_eq!(kv_store.get_bytes("k").await.unwrap(), None); } #[tokio::test] - async fn ttl_expires_entry() { - // Use the store impl directly to bypass validation limits (min TTL 60s) - let temp_dir = tempfile::tempdir().unwrap(); - let db_path = temp_dir.path().join("test.redb"); - let s = PersistentKvStore::new(db_path).unwrap(); - s.put_bytes_with_ttl("temp", Bytes::from("val"), Duration::from_millis(1)) - .await - .unwrap(); - // 200ms gives the OS scheduler enough headroom on busy CI runners. - std::thread::sleep(Duration::from_millis(200)); - assert_eq!(s.get_bytes("temp").await.unwrap(), None); + async fn exists_helper() { + let (kv_store, _dir) = store(); + assert!(!kv_store.exists("nope").await.unwrap()); + kv_store.put_bytes("k", Bytes::from("v")).await.unwrap(); + assert!(kv_store.exists("k").await.unwrap()); } #[tokio::test] - async fn ttl_not_expired_returns_value() { - let (s, _dir) = store(); - s.put_bytes_with_ttl("temp", Bytes::from("val"), Duration::from_secs(60)) - .await - .unwrap(); - assert_eq!(s.get_bytes("temp").await.unwrap(), Some(Bytes::from("val"))); + async fn get_missing_key_returns_none() { + let (kv_store, _dir) = store(); + assert_eq!(kv_store.get_bytes("missing").await.unwrap(), None); } #[tokio::test] async fn list_keys_page_skips_expired_entries() { let temp_dir = tempfile::tempdir().unwrap(); let db_path = temp_dir.path().join("test.redb"); - let s = PersistentKvStore::new(db_path).unwrap(); + let kv_store = PersistentKvStore::new(db_path).unwrap(); - s.put_bytes("app/live", Bytes::from("value")).await.unwrap(); - s.put_bytes_with_ttl("app/expired", Bytes::from("gone"), Duration::from_millis(1)) + kv_store + .put_bytes("app/live", Bytes::from("value")) + .await + .unwrap(); + kv_store + .put_bytes_with_ttl("app/expired", Bytes::from("gone"), Duration::from_millis(1)) .await .unwrap(); - std::thread::sleep(Duration::from_millis(200)); + thread::sleep(Duration::from_millis(200)); - let page = s.list_keys_page("app/", None, 10).await.unwrap(); - assert_eq!(page.keys, vec!["app/live".to_string()]); + let page = kv_store.list_keys_page("app/", None, 10).await.unwrap(); + assert_eq!(page.keys, vec!["app/live".to_owned()]); assert_eq!(page.cursor, None); } #[tokio::test] - async fn cleanup_expired_keys_does_not_delete_fresh_overwrite() { - let temp_dir = tempfile::tempdir().unwrap(); - let db_path = temp_dir.path().join("test.redb"); - let s = PersistentKvStore::new(db_path).unwrap(); + async fn new_store_is_empty() { + let (kv_store, _dir) = store(); + assert!(!kv_store.exists("anything").await.unwrap()); + } + + #[tokio::test] + async fn put_and_get_bytes() { + let (kv_store, _dir) = store(); + kv_store.put_bytes("k", Bytes::from("hello")).await.unwrap(); + assert_eq!( + kv_store.get_bytes("k").await.unwrap(), + Some(Bytes::from("hello")) + ); + } - s.put_bytes_with_ttl("race/key", Bytes::from("stale"), Duration::from_millis(1)) + #[tokio::test] + async fn put_overwrites_existing() { + let (kv_store, _dir) = store(); + kv_store.put_bytes("k", Bytes::from("first")).await.unwrap(); + kv_store + .put_bytes("k", Bytes::from("second")) .await .unwrap(); - std::thread::sleep(Duration::from_millis(200)); - s.put_bytes("race/key", Bytes::from("fresh")).await.unwrap(); - - s.cleanup_expired_keys(&["race/key".to_string()]).unwrap(); - assert_eq!( - s.get_bytes("race/key").await.unwrap(), - Some(Bytes::from("fresh")) + kv_store.get_bytes("k").await.unwrap(), + Some(Bytes::from("second")) ); } - // -- Typed helpers via KvHandle ---------------------------------------- + #[tokio::test] + async fn ttl_expires_entry() { + // Use the store impl directly to bypass validation limits (min TTL 60s) + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("test.redb"); + let kv_store = PersistentKvStore::new(db_path).unwrap(); + kv_store + .put_bytes_with_ttl("temp", Bytes::from("val"), Duration::from_millis(1)) + .await + .unwrap(); + // 200ms gives the OS scheduler enough headroom on busy CI runners. + thread::sleep(Duration::from_millis(200)); + assert_eq!(kv_store.get_bytes("temp").await.unwrap(), None); + } - #[derive(serde::Serialize, serde::Deserialize, PartialEq, Debug)] - struct Config { - name: String, - enabled: bool, + #[tokio::test] + async fn ttl_not_expired_returns_value() { + let (kv_store, _dir) = store(); + kv_store + .put_bytes_with_ttl("temp", Bytes::from("val"), Duration::from_secs(60)) + .await + .unwrap(); + assert_eq!( + kv_store.get_bytes("temp").await.unwrap(), + Some(Bytes::from("val")) + ); } #[tokio::test] async fn typed_roundtrip() { - let (s, _dir) = store(); + let (kv_store, _dir) = store(); let cfg = Config { - name: "test".into(), enabled: true, + name: "test".into(), }; - s.put("config", &cfg).await.unwrap(); - let out: Option = s.get("config").await.unwrap(); + kv_store.put("config", &cfg).await.unwrap(); + let out: Option = kv_store.get("config").await.unwrap(); assert_eq!(out, Some(cfg)); } #[tokio::test] async fn update_helper() { - let (s, _dir) = store(); - s.put("counter", &0i32).await.unwrap(); - let val = s - .read_modify_write("counter", 0i32, |n| n + 5) + let (kv_store, _dir) = store(); + kv_store.put("counter", &0_i32).await.unwrap(); + let val = kv_store + .read_modify_write("counter", 0_i32, |num| num + 5_i32) .await .unwrap(); - assert_eq!(val, 5); - } - - #[tokio::test] - async fn exists_helper() { - let (s, _dir) = store(); - assert!(!s.exists("nope").await.unwrap()); - s.put_bytes("k", Bytes::from("v")).await.unwrap(); - assert!(s.exists("k").await.unwrap()); + assert_eq!(val, 5_i32); } - - #[tokio::test] - async fn new_store_is_empty() { - let (s, _dir) = store(); - assert!(!s.exists("anything").await.unwrap()); - } - - #[test] - fn concurrent_writes_dont_panic() { - let temp_dir = tempfile::tempdir().unwrap(); - let db_path = temp_dir.path().join("test.redb"); - let s = PersistentKvStore::new(db_path).unwrap(); - let handle = KvHandle::new(Arc::new(s)); - - // KvHandle futures are !Send (async_trait(?Send) for WASM compat), so - // tokio::spawn is off-limits. Use OS threads instead — KvHandle is - // Send + Sync, so each thread moves its own clone and runs its own - // executor. This is genuinely concurrent at the OS level. - let threads: Vec<_> = (0..100i32) - .map(|i| { - let h = handle.clone(); - std::thread::spawn(move || { - futures::executor::block_on(async move { - let key = format!("key:{i}"); - h.put(&key, &i).await.unwrap(); - }); - }) - }) - .collect(); - - for t in threads { - t.join().expect("writer thread panicked"); - } - - // Verify all 100 keys survived concurrent writes with correct values. - futures::executor::block_on(async { - for i in 0..100i32 { - let key = format!("key:{i}"); - let val: i32 = handle.get_or(&key, -1).await.unwrap(); - assert_eq!(val, i, "key:{i} has wrong value after concurrent writes"); - } - }); - } - - #[tokio::test] - async fn data_persists_across_reopens() { - let temp_dir = tempfile::tempdir().unwrap(); - let db_path = temp_dir.path().join("test.redb"); - - // Write data - { - let store = PersistentKvStore::new(&db_path).unwrap(); - store - .put_bytes("persistent", Bytes::from("value")) - .await - .unwrap(); - } - - // Reopen and verify data persists - { - let store = PersistentKvStore::new(&db_path).unwrap(); - let value = store.get_bytes("persistent").await.unwrap(); - assert_eq!(value, Some(Bytes::from("value"))); - } - } - - // Run the shared contract tests against PersistentKvStore. - // `Box::leak` intentionally extends the TempDir's lifetime to 'static so - // it remains alive for the duration of the test process. The directory is - // deleted when the process exits, unlike `.keep()` which leaves it behind - // permanently. - edgezero_core::key_value_store_contract_tests!(persistent_kv_contract, { - let dir = Box::leak(Box::new(tempfile::tempdir().unwrap())); - let db_path = dir.path().join("contract.redb"); - PersistentKvStore::new(db_path).unwrap() - }); } diff --git a/crates/edgezero-adapter-axum/src/lib.rs b/crates/edgezero-adapter-axum/src/lib.rs index ae9e539..d4cedf9 100644 --- a/crates/edgezero-adapter-axum/src/lib.rs +++ b/crates/edgezero-adapter-axum/src/lib.rs @@ -1,45 +1,26 @@ -//! Axum adapter for EdgeZero routers and applications. +//! Axum adapter for `EdgeZero` routers and applications. #[cfg(feature = "axum")] pub mod config_store; #[cfg(feature = "axum")] -mod context; +pub mod context; #[cfg(feature = "axum")] -mod dev_server; +pub mod dev_server; #[cfg(feature = "axum")] pub mod key_value_store; #[cfg(feature = "axum")] -mod proxy; +pub mod proxy; #[cfg(feature = "axum")] -mod request; +pub mod request; #[cfg(feature = "axum")] -mod response; +pub mod response; #[cfg(feature = "axum")] pub mod secret_store; #[cfg(feature = "axum")] -mod service; +pub mod service; #[cfg(feature = "cli")] pub mod cli; #[cfg(test)] pub mod test_utils; - -#[cfg(feature = "axum")] -pub use config_store::AxumConfigStore; -#[cfg(feature = "axum")] -pub use context::AxumRequestContext; -#[cfg(feature = "axum")] -pub use dev_server::{run_app, AxumDevServer, AxumDevServerConfig}; -#[cfg(feature = "axum")] -pub use key_value_store::PersistentKvStore; -#[cfg(feature = "axum")] -pub use proxy::AxumProxyClient; -#[cfg(feature = "axum")] -pub use request::into_core_request; -#[cfg(feature = "axum")] -pub use response::into_axum_response; -#[cfg(feature = "axum")] -pub use secret_store::EnvSecretStore; -#[cfg(feature = "axum")] -pub use service::EdgeZeroAxumService; diff --git a/crates/edgezero-adapter-axum/src/proxy.rs b/crates/edgezero-adapter-axum/src/proxy.rs index 6014955..8a1d404 100644 --- a/crates/edgezero-adapter-axum/src/proxy.rs +++ b/crates/edgezero-adapter-axum/src/proxy.rs @@ -5,31 +5,40 @@ use edgezero_core::body::Body; use edgezero_core::error::EdgeError; use edgezero_core::http::{HeaderName, HeaderValue, Method, StatusCode}; use edgezero_core::proxy::{ProxyClient, ProxyRequest, ProxyResponse}; -use futures_util::StreamExt; +use futures_util::StreamExt as _; use reqwest::{header, Client}; pub struct AxumProxyClient { client: Client, } -impl Default for AxumProxyClient { - fn default() -> Self { - let client = Client::builder() - .timeout(Duration::from_secs(30)) - .build() - .expect("reqwest client"); - Self { client } +impl AxumProxyClient { + /// Construct a proxy client with the workspace-default 30-second timeout. + /// + /// **Breaking change (pre-1.0):** previously `AxumProxyClient` implemented + /// `Default` and panicked if reqwest's TLS backend could not be initialised. + /// Construction is now fallible so callers can decide how to handle a + /// missing or misconfigured TLS backend. + /// + /// # Errors + /// Returns the underlying [`reqwest::Error`] if `reqwest::Client::builder().build()` + /// fails — typically because the TLS backend cannot be initialised on this target. + #[inline] + pub fn try_new() -> Result { + let client = Client::builder().timeout(Duration::from_secs(30)).build()?; + Ok(Self { client }) } } #[async_trait(?Send)] impl ProxyClient for AxumProxyClient { + #[inline] async fn send(&self, request: ProxyRequest) -> Result { let (method, uri, headers, body, _extensions) = request.into_parts(); let reqwest_method = reqwest_method(&method)?; let mut builder = self.client.request(reqwest_method, uri.to_string()); - for (name, value) in headers.iter() { + for (name, value) in &headers { let header_name = header::HeaderName::from_bytes(name.as_str().as_bytes()) .map_err(EdgeError::internal)?; let header_value = @@ -41,8 +50,8 @@ impl ProxyClient for AxumProxyClient { Body::Once(bytes) => builder.body(bytes.to_vec()), Body::Stream(mut stream) => { let mut buf = Vec::new(); - while let Some(chunk) = stream.next().await { - let chunk = chunk.map_err(EdgeError::internal)?; + while let Some(result) = stream.next().await { + let chunk = result.map_err(EdgeError::internal)?; buf.extend_from_slice(&chunk); } builder.body(buf) @@ -54,7 +63,7 @@ impl ProxyClient for AxumProxyClient { StatusCode::from_u16(response.status().as_u16()).map_err(EdgeError::internal)?; let mut proxy_response = ProxyResponse::new(status, Body::empty()); - for (name, value) in response.headers().iter() { + for (name, value) in response.headers() { let header_name = HeaderName::from_bytes(name.as_str().as_bytes()).map_err(EdgeError::internal)?; let header_value = @@ -78,6 +87,7 @@ fn reqwest_method(method: &Method) -> Result { #[cfg(test)] mod tests { use super::*; + use std::mem; #[test] fn converts_method_to_reqwest() { @@ -105,18 +115,21 @@ mod tests { #[test] fn default_client_creates_successfully() { - let client = AxumProxyClient::default(); + let client = AxumProxyClient::try_new().expect("reqwest client init"); // Just verify it builds without panicking - assert!(std::mem::size_of_val(&client) > 0); + assert!(mem::size_of_val(&client) > 0); } } #[cfg(test)] mod integration_tests { use super::*; - use axum::{routing::get, routing::post, Router}; + use axum::body::Bytes as AxumBytes; + use axum::http::header::CONTENT_TYPE; + use axum::http::{HeaderMap as AxumHeaderMap, StatusCode as AxumStatusCode}; + use axum::routing::{delete, get, patch, post, put}; + use axum::Router; use edgezero_core::http::Uri; - use edgezero_core::proxy::ProxyClient; use tokio::net::TcpListener; async fn start_test_server(router: Router) -> String { @@ -125,7 +138,7 @@ mod integration_tests { tokio::spawn(async move { axum::serve(listener, router).await.unwrap(); }); - format!("http://{}", addr) + format!("http://{addr}") } #[tokio::test] @@ -133,8 +146,8 @@ mod integration_tests { let app = Router::new().route("/test", get(|| async { "hello from server" })); let base_url = start_test_server(app).await; - let client = AxumProxyClient::default(); - let uri: Uri = format!("{}/test", base_url).parse().unwrap(); + let client = AxumProxyClient::try_new().expect("reqwest client init"); + let uri: Uri = format!("{base_url}/test").parse().unwrap(); let request = ProxyRequest::new(Method::GET, uri); let response = client.send(request).await.expect("response"); @@ -142,17 +155,17 @@ mod integration_tests { match response.body() { Body::Once(bytes) => assert_eq!(bytes.as_ref(), b"hello from server"), - _ => panic!("expected buffered body"), + Body::Stream(_) => panic!("expected buffered body"), } } #[tokio::test] async fn proxy_client_sends_post_with_body() { - let app = Router::new().route("/echo", post(|body: axum::body::Bytes| async move { body })); + let app = Router::new().route("/echo", post(|body: AxumBytes| async move { body })); let base_url = start_test_server(app).await; - let client = AxumProxyClient::default(); - let uri: Uri = format!("{}/echo", base_url).parse().unwrap(); + let client = AxumProxyClient::try_new().expect("reqwest client init"); + let uri: Uri = format!("{base_url}/echo").parse().unwrap(); let mut request = ProxyRequest::new(Method::POST, uri); *request.body_mut() = Body::from("request body data"); @@ -161,7 +174,7 @@ mod integration_tests { match response.body() { Body::Once(bytes) => assert_eq!(bytes.as_ref(), b"request body data"), - _ => panic!("expected buffered body"), + Body::Stream(_) => panic!("expected buffered body"), } } @@ -169,18 +182,18 @@ mod integration_tests { async fn proxy_client_forwards_request_headers() { let app = Router::new().route( "/headers", - get(|headers: axum::http::HeaderMap| async move { + get(|headers: AxumHeaderMap| async move { headers .get("x-custom-header") - .and_then(|v| v.to_str().ok()) + .and_then(|val| val.to_str().ok()) .unwrap_or("missing") - .to_string() + .to_owned() }), ); let base_url = start_test_server(app).await; - let client = AxumProxyClient::default(); - let uri: Uri = format!("{}/headers", base_url).parse().unwrap(); + let client = AxumProxyClient::try_new().expect("reqwest client init"); + let uri: Uri = format!("{base_url}/headers").parse().unwrap(); let mut request = ProxyRequest::new(Method::GET, uri); request .headers_mut() @@ -191,7 +204,7 @@ mod integration_tests { match response.body() { Body::Once(bytes) => assert_eq!(bytes.as_ref(), b"custom-value"), - _ => panic!("expected buffered body"), + Body::Stream(_) => panic!("expected buffered body"), } } @@ -199,17 +212,12 @@ mod integration_tests { async fn proxy_client_receives_response_headers() { let app = Router::new().route( "/with-headers", - get(|| async { - ( - [(axum::http::header::CONTENT_TYPE, "application/json")], - "{}", - ) - }), + get(|| async { ([(CONTENT_TYPE, "application/json")], "{}") }), ); let base_url = start_test_server(app).await; - let client = AxumProxyClient::default(); - let uri: Uri = format!("{}/with-headers", base_url).parse().unwrap(); + let client = AxumProxyClient::try_new().expect("reqwest client init"); + let uri: Uri = format!("{base_url}/with-headers").parse().unwrap(); let request = ProxyRequest::new(Method::GET, uri); let response = client.send(request).await.expect("response"); @@ -218,7 +226,7 @@ mod integration_tests { let content_type = response .headers() .get("content-type") - .and_then(|v| v.to_str().ok()); + .and_then(|val| val.to_str().ok()); assert_eq!(content_type, Some("application/json")); } @@ -227,8 +235,8 @@ mod integration_tests { let app = Router::new(); let base_url = start_test_server(app).await; - let client = AxumProxyClient::default(); - let uri: Uri = format!("{}/nonexistent", base_url).parse().unwrap(); + let client = AxumProxyClient::try_new().expect("reqwest client init"); + let uri: Uri = format!("{base_url}/nonexistent").parse().unwrap(); let request = ProxyRequest::new(Method::GET, uri); let response = client.send(request).await.expect("response"); @@ -239,12 +247,12 @@ mod integration_tests { async fn proxy_client_handles_500() { let app = Router::new().route( "/error", - get(|| async { (axum::http::StatusCode::INTERNAL_SERVER_ERROR, "error") }), + get(|| async { (AxumStatusCode::INTERNAL_SERVER_ERROR, "error") }), ); let base_url = start_test_server(app).await; - let client = AxumProxyClient::default(); - let uri: Uri = format!("{}/error", base_url).parse().unwrap(); + let client = AxumProxyClient::try_new().expect("reqwest client init"); + let uri: Uri = format!("{base_url}/error").parse().unwrap(); let request = ProxyRequest::new(Method::GET, uri); let response = client.send(request).await.expect("response"); @@ -256,12 +264,12 @@ mod integration_tests { let app = Router::new() .route("/method", get(|| async { "GET" })) .route("/method", post(|| async { "POST" })) - .route("/method", axum::routing::put(|| async { "PUT" })) - .route("/method", axum::routing::delete(|| async { "DELETE" })) - .route("/method", axum::routing::patch(|| async { "PATCH" })); + .route("/method", put(|| async { "PUT" })) + .route("/method", delete(|| async { "DELETE" })) + .route("/method", patch(|| async { "PATCH" })); let base_url = start_test_server(app).await; - let client = AxumProxyClient::default(); + let client = AxumProxyClient::try_new().expect("reqwest client init"); for (method, expected_body) in [ (Method::GET, "GET"), @@ -270,26 +278,28 @@ mod integration_tests { (Method::DELETE, "DELETE"), (Method::PATCH, "PATCH"), ] { - let uri: Uri = format!("{}/method", base_url).parse().unwrap(); + let uri: Uri = format!("{base_url}/method").parse().unwrap(); let request = ProxyRequest::new(method, uri); let response = client.send(request).await.expect("response"); assert_eq!(response.status(), StatusCode::OK); match response.body() { Body::Once(bytes) => assert_eq!(bytes.as_ref(), expected_body.as_bytes()), - _ => panic!("expected buffered body"), + Body::Stream(_) => panic!("expected buffered body"), } } } #[tokio::test] async fn proxy_client_handles_connection_refused() { - let client = AxumProxyClient::default(); + let client = AxumProxyClient::try_new().expect("reqwest client init"); // Use a port that's unlikely to have anything running let uri: Uri = "http://127.0.0.1:1".parse().unwrap(); let request = ProxyRequest::new(Method::GET, uri); - let result = client.send(request).await; - assert!(result.is_err()); + client + .send(request) + .await + .expect_err("expected connection refused"); } #[tokio::test] @@ -297,14 +307,11 @@ mod integration_tests { use bytes::Bytes; use futures::stream; - let app = Router::new().route( - "/stream-echo", - post(|body: axum::body::Bytes| async move { body }), - ); + let app = Router::new().route("/stream-echo", post(|body: AxumBytes| async move { body })); let base_url = start_test_server(app).await; - let client = AxumProxyClient::default(); - let uri: Uri = format!("{}/stream-echo", base_url).parse().unwrap(); + let client = AxumProxyClient::try_new().expect("reqwest client init"); + let uri: Uri = format!("{base_url}/stream-echo").parse().unwrap(); let mut request = ProxyRequest::new(Method::POST, uri); // Create a streaming body - Body::stream expects Stream @@ -321,7 +328,7 @@ mod integration_tests { match response.body() { Body::Once(bytes) => assert_eq!(bytes.as_ref(), b"chunk1chunk2chunk3"), - _ => panic!("expected buffered body"), + Body::Stream(_) => panic!("expected buffered body"), } } } diff --git a/crates/edgezero-adapter-axum/src/request.rs b/crates/edgezero-adapter-axum/src/request.rs index e1e973d..91a905e 100644 --- a/crates/edgezero-adapter-axum/src/request.rs +++ b/crates/edgezero-adapter-axum/src/request.rs @@ -1,6 +1,6 @@ use std::net::SocketAddr; -use axum::body::Body as AxumBody; +use axum::body::{to_bytes, Body as AxumBody}; use axum::extract::connect_info::ConnectInfo; use axum::http::Request; use edgezero_core::body::Body; @@ -12,20 +12,24 @@ use edgezero_core::proxy::ProxyHandle; use crate::context::AxumRequestContext; use crate::proxy::AxumProxyClient; -/// Convert an Axum/Hyper request into an EdgeZero core request while preserving streaming bodies +/// Convert an Axum/Hyper request into an `EdgeZero` core request while preserving streaming bodies /// and exposing connection metadata through `AxumRequestContext`. +/// +/// # Errors +/// Returns an error if a buffered (`application/json`) body cannot be read into memory. +#[inline] pub async fn into_core_request(request: Request) -> Result { - let (parts, body) = request.into_parts(); + let (parts, axum_body) = request.into_parts(); let body = match parts.headers.get(CONTENT_TYPE) { Some(value) if is_json_content_type(value) => { - let bytes = axum::body::to_bytes(body, usize::MAX) + let bytes = to_bytes(axum_body, usize::MAX) .await - .map_err(|e| format!("Failed to convert body into bytes: {e}"))?; + .map_err(|err| format!("Failed to convert body into bytes: {err}"))?; Body::from_bytes(bytes) } _ => { - let stream = body.into_data_stream(); + let stream = axum_body.into_data_stream(); Body::from_stream(stream) } }; @@ -48,9 +52,11 @@ pub async fn into_core_request(request: Request) -> Result bool { return false; }; - let media_type = raw.split(';').next().map(str::trim).unwrap_or(""); + let media_type = raw.split(';').next().map_or("", str::trim); if media_type.eq_ignore_ascii_case("application/json") { return true; } - let Some((ty, subtype)) = media_type.split_once('/') else { + let Some((ty, raw_subtype)) = media_type.split_once('/') else { return false; }; @@ -73,8 +79,13 @@ fn is_json_content_type(value: &HeaderValue) -> bool { return false; } - let subtype = subtype.trim(); - subtype.len() >= 5 && subtype[subtype.len() - 5..].eq_ignore_ascii_case("+json") + let subtype = raw_subtype.trim(); + let Some(suffix_start) = subtype.len().checked_sub(5) else { + return false; + }; + subtype + .get(suffix_start..) + .is_some_and(|suffix| suffix.eq_ignore_ascii_case("+json")) } #[cfg(test)] @@ -168,7 +179,7 @@ mod tests { } #[test] - fn test_is_json_content_type() { + fn json_content_type_detection() { assert!(is_json_content_type(&HeaderValue::from_static( "application/json" ))); diff --git a/crates/edgezero-adapter-axum/src/response.rs b/crates/edgezero-adapter-axum/src/response.rs index 46dc38f..9ad56d0 100644 --- a/crates/edgezero-adapter-axum/src/response.rs +++ b/crates/edgezero-adapter-axum/src/response.rs @@ -1,25 +1,27 @@ use axum::body::Body as AxumBody; -use axum::http::{Response, StatusCode}; +use axum::http::header::CONTENT_TYPE; +use axum::http::{HeaderValue, Response, StatusCode}; use futures::executor::block_on; -use futures_util::{pin_mut, StreamExt}; +use futures_util::{pin_mut, StreamExt as _}; use tracing::error; use edgezero_core::body::Body; use edgezero_core::http::Response as CoreResponse; -/// Convert an EdgeZero response into one consumable by Axum/Hyper. +/// Convert an `EdgeZero` response into one consumable by Axum/Hyper. /// /// Streaming responses are collected into an in-memory buffer. While this sacrifices /// incremental flushing, it keeps the adapter compatible with the non-`Send` streaming type used by /// `edgezero_core::Body` and works well for local development. +/// +#[inline] pub fn into_axum_response(response: CoreResponse) -> Response { - let (parts, body) = response.into_parts(); - let body = match body { + let (parts, core_body) = response.into_parts(); + let body = match core_body { Body::Once(bytes) => AxumBody::from(bytes), Body::Stream(stream) => { let result = block_on(async { let mut buf = Vec::new(); - let stream = stream; pin_mut!(stream); while let Some(chunk) = stream.next().await { let bytes = chunk?; @@ -31,16 +33,7 @@ pub fn into_axum_response(response: CoreResponse) -> Response { Ok(buf) => AxumBody::from(buf), Err(err) => { error!("streaming response error: {err}"); - let body = AxumBody::from("streaming response error"); - let mut response = Response::builder() - .status(StatusCode::INTERNAL_SERVER_ERROR) - .body(body) - .expect("error response"); - response.headers_mut().insert( - axum::http::header::CONTENT_TYPE, - axum::http::HeaderValue::from_static("text/plain; charset=utf-8"), - ); - return response; + return error_response_500("streaming response error"); } } } @@ -49,6 +42,18 @@ pub fn into_axum_response(response: CoreResponse) -> Response { Response::from_parts(parts, body) } +/// Build a minimal 500 response without any builder steps that could fail. +/// Used as a fallback on the request path so we never panic on synthesis. +fn error_response_500(message: &'static str) -> Response { + let mut response = Response::new(AxumBody::from(message)); + *response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; + response.headers_mut().insert( + CONTENT_TYPE, + HeaderValue::from_static("text/plain; charset=utf-8"), + ); + response +} + #[cfg(test)] mod tests { use super::*; @@ -83,9 +88,9 @@ mod tests { let collected = block_on(async { let mut data = Vec::new(); - let mut stream = axum_response.into_body().into_data_stream(); - while let Some(chunk) = stream.next().await { - let chunk = chunk.expect("chunk"); + let mut body_stream = axum_response.into_body().into_data_stream(); + while let Some(result) = body_stream.next().await { + let chunk = result.expect("chunk"); data.extend_from_slice(&chunk); } data diff --git a/crates/edgezero-adapter-axum/src/secret_store.rs b/crates/edgezero-adapter-axum/src/secret_store.rs index 1d216c8..93827d3 100644 --- a/crates/edgezero-adapter-axum/src/secret_store.rs +++ b/crates/edgezero-adapter-axum/src/secret_store.rs @@ -7,6 +7,8 @@ //! API_KEY=mysecret cargo edgezero dev //! ``` +use std::env; + use async_trait::async_trait; use bytes::Bytes; use edgezero_core::secret_store::{SecretError, SecretStore}; @@ -18,12 +20,15 @@ use edgezero_core::secret_store::{SecretError, SecretStore}; pub struct EnvSecretStore; impl EnvSecretStore { + #[must_use] + #[inline] pub fn new() -> Self { Self } } impl Default for EnvSecretStore { + #[inline] fn default() -> Self { Self::new() } @@ -31,12 +36,13 @@ impl Default for EnvSecretStore { #[async_trait(?Send)] impl SecretStore for EnvSecretStore { + #[inline] async fn get_bytes(&self, _store_name: &str, key: &str) -> Result, SecretError> { #[cfg(unix)] { - use std::os::unix::ffi::OsStringExt; + use std::os::unix::ffi::OsStringExt as _; - match std::env::var_os(key) { + match env::var_os(key) { Some(value) => Ok(Some(Bytes::from(value.into_vec()))), None => Ok(None), } @@ -44,12 +50,14 @@ impl SecretStore for EnvSecretStore { #[cfg(not(unix))] { - match std::env::var(key) { + use std::env::VarError; + + match env::var(key) { Ok(value) => Ok(Some(Bytes::from(value.into_bytes()))), - Err(std::env::VarError::NotPresent) => Ok(None), - Err(std::env::VarError::NotUnicode(_)) => Err(SecretError::Internal( - anyhow::anyhow!("secret store returned an invalid Unicode value"), - )), + Err(VarError::NotPresent) => Ok(None), + Err(VarError::NotUnicode(_)) => Err(SecretError::Internal(anyhow::anyhow!( + "secret store returned an invalid Unicode value" + ))), } } } @@ -57,63 +65,63 @@ impl SecretStore for EnvSecretStore { #[cfg(test)] mod tests { + // Contract tests: use InMemorySecretStoreProvider since EnvSecretStore needs + // real env vars, which are unsafe in parallel tests. + // The EnvSecretStore is tested individually above. + secret_store_contract_tests!(env_secret_contract, { + InMemorySecretStore::new([ + ("mystore/contract_key", Bytes::from("contract_value")), + ("mystore/contract_key_2", Bytes::from("another_value")), + ]) + }); + use super::*; use crate::test_utils::{env_guard, EnvOverride}; use bytes::Bytes; + use edgezero_core::secret_store::InMemorySecretStore; + use edgezero_core::secret_store_contract_tests; #[cfg(unix)] use std::ffi::OsString; + #[cfg(unix)] #[tokio::test(flavor = "current_thread")] - async fn get_bytes_returns_none_when_var_not_set() { + async fn get_bytes_preserves_non_utf8_secret_values() { + use std::os::unix::ffi::OsStringExt as _; + let _guard = env_guard().lock().await; - let _env = EnvOverride::clear("__EDGEZERO_TEST_MISSING_VAR_XYZ__"); + let _env = EnvOverride::set( + "__EDGEZERO_TEST_BINARY_SECRET__", + OsString::from_vec(vec![0xff, 0x61]), + ); let store = EnvSecretStore::new(); let result = store - .get_bytes("env", "__EDGEZERO_TEST_MISSING_VAR_XYZ__") + .get_bytes("env", "__EDGEZERO_TEST_BINARY_SECRET__") .await .unwrap(); - assert!(result.is_none()); + assert_eq!(result, Some(Bytes::from_static(&[0xff, 0x61]))); } #[tokio::test(flavor = "current_thread")] - async fn get_bytes_returns_value_when_var_set() { + async fn get_bytes_returns_none_when_var_not_set() { let _guard = env_guard().lock().await; - let _env = EnvOverride::set("__EDGEZERO_TEST_SECRET__", "test_value_123"); + let _env = EnvOverride::clear("__EDGEZERO_TEST_MISSING_VAR_XYZ__"); let store = EnvSecretStore::new(); let result = store - .get_bytes("env", "__EDGEZERO_TEST_SECRET__") + .get_bytes("env", "__EDGEZERO_TEST_MISSING_VAR_XYZ__") .await .unwrap(); - assert_eq!(result, Some(Bytes::from("test_value_123"))); + assert!(result.is_none()); } - #[cfg(unix)] #[tokio::test(flavor = "current_thread")] - async fn get_bytes_preserves_non_utf8_secret_values() { - use std::os::unix::ffi::OsStringExt; - + async fn get_bytes_returns_value_when_var_set() { let _guard = env_guard().lock().await; - let _env = EnvOverride::set( - "__EDGEZERO_TEST_BINARY_SECRET__", - OsString::from_vec(vec![0xff, 0x61]), - ); + let _env = EnvOverride::set("__EDGEZERO_TEST_SECRET__", "test_value_123"); let store = EnvSecretStore::new(); let result = store - .get_bytes("env", "__EDGEZERO_TEST_BINARY_SECRET__") + .get_bytes("env", "__EDGEZERO_TEST_SECRET__") .await .unwrap(); - assert_eq!(result, Some(Bytes::from_static(&[0xff, 0x61]))); + assert_eq!(result, Some(Bytes::from("test_value_123"))); } - - // Contract tests: use InMemorySecretStoreProvider since EnvSecretStore needs - // real env vars, which are unsafe in parallel tests. - // The EnvSecretStore is tested individually above. - use edgezero_core::secret_store_contract_tests; - - secret_store_contract_tests!(env_secret_contract, { - edgezero_core::InMemorySecretStore::new([ - ("mystore/contract_key", Bytes::from("contract_value")), - ("mystore/contract_key_2", Bytes::from("another_value")), - ]) - }); } diff --git a/crates/edgezero-adapter-axum/src/service.rs b/crates/edgezero-adapter-axum/src/service.rs index cf6ba27..9e88ca1 100644 --- a/crates/edgezero-adapter-axum/src/service.rs +++ b/crates/edgezero-adapter-axum/src/service.rs @@ -16,21 +16,23 @@ use tower::Service; use crate::request::into_core_request; use crate::response::into_axum_response; -/// Tower service that adapts EdgeZero router requests to Axum/Hyper compatible responses. +/// Tower service that adapts `EdgeZero` router requests to Axum/Hyper compatible responses. #[derive(Clone)] pub struct EdgeZeroAxumService { - router: RouterService, config_store_handle: Option, kv_handle: Option, + router: RouterService, secret_handle: Option, } impl EdgeZeroAxumService { + #[must_use] + #[inline] pub fn new(router: RouterService) -> Self { Self { - router, config_store_handle: None, kv_handle: None, + router, secret_handle: None, } } @@ -40,6 +42,7 @@ impl EdgeZeroAxumService { /// The handle is cloned into every request's extensions, making /// `ctx.config_store()` available in handlers. #[must_use] + #[inline] pub fn with_config_store_handle(mut self, handle: ConfigStoreHandle) -> Self { self.config_store_handle = Some(handle); self @@ -50,6 +53,7 @@ impl EdgeZeroAxumService { /// The handle is cloned into every request's extensions, making /// the `Kv` extractor available in handlers. #[must_use] + #[inline] pub fn with_kv_handle(mut self, handle: KvHandle) -> Self { self.kv_handle = Some(handle); self @@ -60,6 +64,7 @@ impl EdgeZeroAxumService { /// The handle is cloned into every request's extensions, making /// the `Secrets` extractor available in handlers. #[must_use] + #[inline] pub fn with_secret_handle(mut self, handle: SecretHandle) -> Self { self.secret_handle = Some(handle); self @@ -67,24 +72,21 @@ impl EdgeZeroAxumService { } impl Service> for EdgeZeroAxumService { - type Response = Response; type Error = Infallible; type Future = Pin> + Send>>; + type Response = Response; - fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn call(&mut self, request: Request) -> Self::Future { + #[inline] + fn call(&mut self, req: Request) -> Self::Future { let router = self.router.clone(); let config_store_handle = self.config_store_handle.clone(); let kv_handle = self.kv_handle.clone(); let secret_handle = self.secret_handle.clone(); Box::pin(async move { - let mut core_request = match into_core_request(request).await { - Ok(req) => req, - Err(e) => { - let mut err_response = Response::new(AxumBody::from(e.to_string())); + let mut core_request = match into_core_request(req).await { + Ok(converted) => converted, + Err(err) => { + let mut err_response = Response::new(AxumBody::from(err.clone())); *err_response.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; return Ok(err_response); @@ -99,29 +101,44 @@ impl Service> for EdgeZeroAxumService { core_request.extensions_mut().insert(handle); } - if let Some(secret_handle) = secret_handle { - core_request.extensions_mut().insert(secret_handle); + if let Some(handle) = secret_handle { + core_request.extensions_mut().insert(handle); } let core_response = task::block_in_place(move || { Handle::current().block_on(router.oneshot(core_request)) }); - let response = into_axum_response(core_response); + let response = match core_response { + Ok(response) => into_axum_response(response), + Err(err) => { + let body = AxumBody::from(format!("internal error: {err}")); + let mut fallback = Response::new(body); + *fallback.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; + fallback + } + }; Ok(response) }) } + + #[inline] + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } } #[cfg(test)] mod tests { use super::*; + use axum::body::to_bytes; use edgezero_core::body::Body; use edgezero_core::config_store::{ConfigStore, ConfigStoreError, ConfigStoreHandle}; use edgezero_core::context::RequestContext; use edgezero_core::error::EdgeError; use edgezero_core::http::{response_builder, StatusCode}; + use edgezero_core::key_value_store::KvStore; use std::sync::Arc; - use tower::ServiceExt; + use tower::ServiceExt as _; struct FixedConfigStore(String); @@ -151,7 +168,7 @@ mod tests { #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn with_config_store_handle_injects_into_request() { - let handle = ConfigStoreHandle::new(Arc::new(FixedConfigStore("injected".to_string()))); + let handle = ConfigStoreHandle::new(Arc::new(FixedConfigStore("injected".to_owned()))); let router = RouterService::builder() .get("/check", |ctx: RequestContext| async move { @@ -176,10 +193,8 @@ mod tests { let response = service.ready().await.unwrap().call(request).await.unwrap(); assert_eq!(response.status(), StatusCode::OK); - let body = axum::body::to_bytes(response.into_body(), usize::MAX) - .await - .unwrap(); - assert_eq!(&body[..], b"injected"); + let body = to_bytes(response.into_body(), usize::MAX).await.unwrap(); + assert_eq!(&*body, b"injected"); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] @@ -188,8 +203,8 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); let db_path = temp_dir.path().join("test.redb"); - let store = Arc::new(PersistentKvStore::new(db_path).unwrap()); - let handle = KvHandle::new(store.clone()); + let store: Arc = Arc::new(PersistentKvStore::new(db_path).unwrap()); + let handle = KvHandle::new(Arc::clone(&store)); handle.put("test_key", &"injected").await.unwrap(); let router = RouterService::builder() @@ -212,10 +227,8 @@ mod tests { let response = service.ready().await.unwrap().call(request).await.unwrap(); assert_eq!(response.status(), StatusCode::OK); - let body = axum::body::to_bytes(response.into_body(), usize::MAX) - .await - .unwrap(); - assert_eq!(&body[..], b"injected"); + let body = to_bytes(response.into_body(), usize::MAX).await.unwrap(); + assert_eq!(&*body, b"injected"); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] @@ -239,10 +252,8 @@ mod tests { let response = service.ready().await.unwrap().call(request).await.unwrap(); assert_eq!(response.status(), StatusCode::OK); - let body = axum::body::to_bytes(response.into_body(), usize::MAX) - .await - .unwrap(); - assert_eq!(&body[..], b"has_config=false"); + let body = to_bytes(response.into_body(), usize::MAX).await.unwrap(); + assert_eq!(&*body, b"has_config=false"); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] @@ -264,7 +275,7 @@ mod tests { .get_bytes("env", "__EDGEZERO_SERVICE_TEST_SECRET__") .await .unwrap() - .map(|b| String::from_utf8_lossy(&b).into_owned()) + .map(|bytes| String::from_utf8_lossy(&bytes).into_owned()) .unwrap_or_default(); let response = response_builder() .status(StatusCode::OK) @@ -281,10 +292,8 @@ mod tests { .unwrap(); let response = service.ready().await.unwrap().call(request).await.unwrap(); assert_eq!(response.status(), StatusCode::OK); - let body = axum::body::to_bytes(response.into_body(), usize::MAX) - .await - .unwrap(); - assert_eq!(&body[..], b"injected_value"); + let body = to_bytes(response.into_body(), usize::MAX).await.unwrap(); + assert_eq!(&*body, b"injected_value"); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] @@ -308,9 +317,7 @@ mod tests { let response = service.ready().await.unwrap().call(request).await.unwrap(); assert_eq!(response.status(), StatusCode::OK); - let body = axum::body::to_bytes(response.into_body(), usize::MAX) - .await - .unwrap(); - assert_eq!(&body[..], b"has_kv=false"); + let body = to_bytes(response.into_body(), usize::MAX).await.unwrap(); + assert_eq!(&*body, b"has_kv=false"); } } diff --git a/crates/edgezero-adapter-axum/src/templates/Cargo.toml.hbs b/crates/edgezero-adapter-axum/src/templates/Cargo.toml.hbs index a41ca25..d8d120a 100644 --- a/crates/edgezero-adapter-axum/src/templates/Cargo.toml.hbs +++ b/crates/edgezero-adapter-axum/src/templates/Cargo.toml.hbs @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" publish = false +[lints] +workspace = true + [[bin]] name = "{{proj_axum}}" path = "src/main.rs" diff --git a/crates/edgezero-adapter-axum/src/templates/src/main.rs.hbs b/crates/edgezero-adapter-axum/src/templates/src/main.rs.hbs index 5a4b532..c8dd96c 100644 --- a/crates/edgezero-adapter-axum/src/templates/src/main.rs.hbs +++ b/crates/edgezero-adapter-axum/src/templates/src/main.rs.hbs @@ -1,7 +1,7 @@ use {{proj_core_mod}}::App; fn main() { - if let Err(err) = edgezero_adapter_axum::run_app::(include_str!("../../../edgezero.toml")) { + if let Err(err) = edgezero_adapter_axum::dev_server::run_app::(include_str!("../../../edgezero.toml")) { eprintln!("axum adapter failed: {err}"); std::process::exit(1); } diff --git a/crates/edgezero-adapter-axum/src/test_utils.rs b/crates/edgezero-adapter-axum/src/test_utils.rs index ce4e39d..7cfd650 100644 --- a/crates/edgezero-adapter-axum/src/test_utils.rs +++ b/crates/edgezero-adapter-axum/src/test_utils.rs @@ -1,16 +1,8 @@ -use std::ffi::OsString; +use std::env; +use std::ffi::{OsStr, OsString}; use std::sync::OnceLock; use tokio::sync::Mutex; -/// Returns a process-wide mutex used to serialize tests that mutate environment variables. -/// -/// Both `secret_store` and `service` tests share this lock to avoid data races across -/// test threads when setting or clearing environment variables. -pub fn env_guard() -> &'static Mutex<()> { - static GUARD: OnceLock> = OnceLock::new(); - GUARD.get_or_init(|| Mutex::new(())) -} - /// RAII guard that sets an environment variable for the duration of a test and /// restores the original value (or removes the variable) on drop. pub struct EnvOverride { @@ -19,25 +11,35 @@ pub struct EnvOverride { } impl EnvOverride { - pub fn set(key: &'static str, value: impl AsRef) -> Self { - let original = std::env::var_os(key); - std::env::set_var(key, value); + #[must_use] + pub fn clear(key: &'static str) -> Self { + let original = env::var_os(key); + env::remove_var(key); Self { key, original } } - pub fn clear(key: &'static str) -> Self { - let original = std::env::var_os(key); - std::env::remove_var(key); + pub fn set(key: &'static str, value: impl AsRef) -> Self { + let original = env::var_os(key); + env::set_var(key, value); Self { key, original } } } impl Drop for EnvOverride { fn drop(&mut self) { - if let Some(ref original) = self.original { - std::env::set_var(self.key, original); + if let Some(original) = &self.original { + env::set_var(self.key, original); } else { - std::env::remove_var(self.key); + env::remove_var(self.key); } } } + +/// Returns a process-wide mutex used to serialize tests that mutate environment variables. +/// +/// Both `secret_store` and `service` tests share this lock to avoid data races across +/// test threads when setting or clearing environment variables. +pub fn env_guard() -> &'static Mutex<()> { + static GUARD: OnceLock> = OnceLock::new(); + GUARD.get_or_init(|| Mutex::new(())) +} diff --git a/crates/edgezero-adapter-cloudflare/Cargo.toml b/crates/edgezero-adapter-cloudflare/Cargo.toml index 89a692c..48a7aac 100644 --- a/crates/edgezero-adapter-cloudflare/Cargo.toml +++ b/crates/edgezero-adapter-cloudflare/Cargo.toml @@ -5,6 +5,9 @@ version = { workspace = true } authors = { workspace = true } license = { workspace = true } +[lints] +workspace = true + [features] default = [] cloudflare = ["dep:worker", "dep:serde_json"] diff --git a/crates/edgezero-adapter-cloudflare/src/cli.rs b/crates/edgezero-adapter-cloudflare/src/cli.rs index a84eaa4..035a1d5 100644 --- a/crates/edgezero-adapter-cloudflare/src/cli.rs +++ b/crates/edgezero-adapter-cloudflare/src/cli.rs @@ -1,3 +1,4 @@ +use std::env; use std::fs; use std::path::{Path, PathBuf}; use std::process::Command; @@ -6,130 +7,69 @@ use ctor::ctor; use edgezero_adapter::cli_support::{ find_manifest_upwards, find_workspace_root, path_distance, read_package_name, }; +use edgezero_adapter::registry::{register_adapter, Adapter, AdapterAction}; use edgezero_adapter::scaffold::{ register_adapter_blueprint, AdapterBlueprint, AdapterFileSpec, CommandTemplates, DependencySpec, LoggingDefaults, ManifestSpec, ReadmeInfo, TemplateRegistration, }; -use edgezero_adapter::{register_adapter, Adapter, AdapterAction}; use walkdir::WalkDir; -const TARGET_TRIPLE: &str = "wasm32-unknown-unknown"; - -pub fn build() -> Result { - let manifest = find_wrangler_manifest( - std::env::current_dir() - .map_err(|e| e.to_string())? - .as_path(), - )?; - let manifest_dir = manifest - .parent() - .ok_or_else(|| "wrangler manifest has no parent directory".to_string())?; - let cargo_manifest = manifest_dir.join("Cargo.toml"); - let crate_name = read_package_name(&cargo_manifest)?; - - let status = Command::new("cargo") - .args([ - "build", - "--release", - "--target", - TARGET_TRIPLE, - "--manifest-path", - cargo_manifest - .to_str() - .ok_or("invalid Cargo manifest path")?, - ]) - .status() - .map_err(|e| format!("failed to run cargo build: {e}"))?; - if !status.success() { - return Err(format!("cargo build failed with status {status}")); - } - - let workspace_root = find_workspace_root(manifest_dir); - let artifact = locate_artifact(&workspace_root, manifest_dir, &crate_name)?; - let pkg_dir = workspace_root.join("pkg"); - fs::create_dir_all(&pkg_dir) - .map_err(|e| format!("failed to create {}: {e}", pkg_dir.display()))?; - let dest = pkg_dir.join(format!("{}.wasm", crate_name.replace('-', "_"))); - fs::copy(&artifact, &dest) - .map_err(|e| format!("failed to copy artifact to {}: {e}", dest.display()))?; - - Ok(dest) -} - -pub fn deploy(extra_args: &[String]) -> Result<(), String> { - let manifest = find_wrangler_manifest( - std::env::current_dir() - .map_err(|e| e.to_string())? - .as_path(), - )?; - let manifest_dir = manifest - .parent() - .ok_or_else(|| "wrangler manifest has no parent directory".to_string())?; - let config = manifest - .to_str() - .ok_or_else(|| "invalid wrangler config path".to_string())?; - - let status = Command::new("wrangler") - .args(["deploy", "--config", config]) - .args(extra_args) - .current_dir(manifest_dir) - .status() - .map_err(|e| format!("failed to run wrangler CLI: {e}"))?; - if !status.success() { - return Err(format!("wrangler deploy failed with status {status}")); - } - - Ok(()) -} - -pub fn serve(extra_args: &[String]) -> Result<(), String> { - let manifest = find_wrangler_manifest( - std::env::current_dir() - .map_err(|e| e.to_string())? - .as_path(), - )?; - let manifest_dir = manifest - .parent() - .ok_or_else(|| "wrangler manifest has no parent directory".to_string())?; - let config = manifest - .to_str() - .ok_or_else(|| "invalid wrangler config path".to_string())?; - - let status = Command::new("wrangler") - .args(["dev", "--config", config]) - .args(extra_args) - .current_dir(manifest_dir) - .status() - .map_err(|e| format!("failed to run wrangler CLI: {e}"))?; - if !status.success() { - return Err(format!("wrangler dev failed with status {status}")); - } - - Ok(()) -} - -struct CloudflareCliAdapter; +static CLOUDFLARE_ADAPTER: CloudflareCliAdapter = CloudflareCliAdapter; -static CLOUDFLARE_TEMPLATE_REGISTRATIONS: &[TemplateRegistration] = &[ - TemplateRegistration { - name: "cf_Cargo_toml", - contents: include_str!("templates/Cargo.toml.hbs"), +static CLOUDFLARE_BLUEPRINT: AdapterBlueprint = AdapterBlueprint { + id: "cloudflare", + display_name: "Cloudflare Workers", + crate_suffix: "adapter-cloudflare", + dependency_crate: "edgezero-adapter-cloudflare", + dependency_repo_path: "crates/edgezero-adapter-cloudflare", + template_registrations: CLOUDFLARE_TEMPLATE_REGISTRATIONS, + files: CLOUDFLARE_FILE_SPECS, + extra_dirs: &["src", ".cargo"], + dependencies: CLOUDFLARE_DEPENDENCIES, + manifest: ManifestSpec { + manifest_filename: "wrangler.toml", + build_target: "wasm32-unknown-unknown", + build_profile: "release", + build_features: &["cloudflare"], }, - TemplateRegistration { - name: "cf_src_lib_rs", - contents: include_str!("templates/src/lib.rs.hbs"), + commands: CommandTemplates { + build: "wrangler build --cwd {crate_dir}", + deploy: "wrangler deploy --cwd {crate_dir}", + serve: "wrangler dev --cwd {crate_dir}", }, - TemplateRegistration { - name: "cf_src_main_rs", - contents: include_str!("templates/src/main.rs.hbs"), + logging: LoggingDefaults { + endpoint: None, + level: "info", + echo_stdout: None, }, - TemplateRegistration { - name: "cf_cargo_config_toml", - contents: include_str!("templates/.cargo/config.toml.hbs"), + readme: ReadmeInfo { + description: "{display} entrypoint.", + dev_heading: "{display} (local)", + dev_steps: &["`edgezero-cli serve --adapter cloudflare`"], }, - TemplateRegistration { - name: "cf_wrangler_toml", - contents: include_str!("templates/wrangler.toml.hbs"), + run_module: "edgezero_adapter_cloudflare", +}; + +static CLOUDFLARE_DEPENDENCIES: &[DependencySpec] = &[ + DependencySpec { + key: "dep_edgezero_core_cloudflare", + repo_crate: "crates/edgezero-core", + fallback: "edgezero-core = { git = \"https://git@github.com/stackpop/edgezero.git\", package = \"edgezero-core\", default-features = false }", + features: &[], + }, + DependencySpec { + key: "dep_edgezero_adapter_cloudflare", + repo_crate: "crates/edgezero-adapter-cloudflare", + fallback: + "edgezero-adapter-cloudflare = { git = \"https://git@github.com/stackpop/edgezero.git\", package = \"edgezero-adapter-cloudflare\", default-features = false }", + features: &[], + }, + DependencySpec { + key: "dep_edgezero_adapter_cloudflare_wasm", + repo_crate: "crates/edgezero-adapter-cloudflare", + fallback: + "edgezero-adapter-cloudflare = { git = \"https://git@github.com/stackpop/edgezero.git\", package = \"edgezero-adapter-cloudflare\", default-features = false, features = [\"cloudflare\"] }", + features: &["cloudflare"], }, ]; @@ -156,92 +96,118 @@ static CLOUDFLARE_FILE_SPECS: &[AdapterFileSpec] = &[ }, ]; -static CLOUDFLARE_DEPENDENCIES: &[DependencySpec] = &[ - DependencySpec { - key: "dep_edgezero_core_cloudflare", - repo_crate: "crates/edgezero-core", - fallback: "edgezero-core = { git = \"https://git@github.com/stackpop/edgezero.git\", package = \"edgezero-core\", default-features = false }", - features: &[], - }, - DependencySpec { - key: "dep_edgezero_adapter_cloudflare", - repo_crate: "crates/edgezero-adapter-cloudflare", - fallback: - "edgezero-adapter-cloudflare = { git = \"https://git@github.com/stackpop/edgezero.git\", package = \"edgezero-adapter-cloudflare\", default-features = false }", - features: &[], - }, - DependencySpec { - key: "dep_edgezero_adapter_cloudflare_wasm", - repo_crate: "crates/edgezero-adapter-cloudflare", - fallback: - "edgezero-adapter-cloudflare = { git = \"https://git@github.com/stackpop/edgezero.git\", package = \"edgezero-adapter-cloudflare\", default-features = false, features = [\"cloudflare\"] }", - features: &["cloudflare"], +static CLOUDFLARE_TEMPLATE_REGISTRATIONS: &[TemplateRegistration] = &[ + TemplateRegistration { + name: "cf_Cargo_toml", + contents: include_str!("templates/Cargo.toml.hbs"), }, -]; - -static CLOUDFLARE_BLUEPRINT: AdapterBlueprint = AdapterBlueprint { - id: "cloudflare", - display_name: "Cloudflare Workers", - crate_suffix: "adapter-cloudflare", - dependency_crate: "edgezero-adapter-cloudflare", - dependency_repo_path: "crates/edgezero-adapter-cloudflare", - template_registrations: CLOUDFLARE_TEMPLATE_REGISTRATIONS, - files: CLOUDFLARE_FILE_SPECS, - extra_dirs: &["src", ".cargo"], - dependencies: CLOUDFLARE_DEPENDENCIES, - manifest: ManifestSpec { - manifest_filename: "wrangler.toml", - build_target: "wasm32-unknown-unknown", - build_profile: "release", - build_features: &["cloudflare"], + TemplateRegistration { + name: "cf_src_lib_rs", + contents: include_str!("templates/src/lib.rs.hbs"), }, - commands: CommandTemplates { - build: "wrangler build --cwd {crate_dir}", - deploy: "wrangler deploy --cwd {crate_dir}", - serve: "wrangler dev --cwd {crate_dir}", + TemplateRegistration { + name: "cf_src_main_rs", + contents: include_str!("templates/src/main.rs.hbs"), }, - logging: LoggingDefaults { - endpoint: None, - level: "info", - echo_stdout: None, + TemplateRegistration { + name: "cf_cargo_config_toml", + contents: include_str!("templates/.cargo/config.toml.hbs"), }, - readme: ReadmeInfo { - description: "{display} entrypoint.", - dev_heading: "{display} (local)", - dev_steps: &["`edgezero-cli serve --adapter cloudflare`"], + TemplateRegistration { + name: "cf_wrangler_toml", + contents: include_str!("templates/wrangler.toml.hbs"), }, - run_module: "edgezero_adapter_cloudflare", -}; +]; -static CLOUDFLARE_ADAPTER: CloudflareCliAdapter = CloudflareCliAdapter; +const TARGET_TRIPLE: &str = "wasm32-unknown-unknown"; -impl Adapter for CloudflareCliAdapter { - fn name(&self) -> &'static str { - "cloudflare" - } +struct CloudflareCliAdapter; +impl Adapter for CloudflareCliAdapter { fn execute(&self, action: AdapterAction, args: &[String]) -> Result<(), String> { match action { AdapterAction::Build => build().map(|artifact| { - println!( + log::info!( "[edgezero] Cloudflare build artifact -> {}", artifact.display() ); }), AdapterAction::Deploy => deploy(args), AdapterAction::Serve => serve(args), + other => Err(format!("cloudflare adapter does not support {other:?}")), } } + + fn name(&self) -> &'static str { + "cloudflare" + } } -pub fn register() { - register_adapter(&CLOUDFLARE_ADAPTER); - register_adapter_blueprint(&CLOUDFLARE_BLUEPRINT); +/// # Errors +/// Returns an error if the Cloudflare wrangler build command fails. +#[inline] +pub fn build() -> Result { + let manifest = + find_wrangler_manifest(env::current_dir().map_err(|err| err.to_string())?.as_path())?; + let manifest_dir = manifest + .parent() + .ok_or_else(|| "wrangler manifest has no parent directory".to_owned())?; + let cargo_manifest = manifest_dir.join("Cargo.toml"); + let crate_name = read_package_name(&cargo_manifest)?; + + let status = Command::new("cargo") + .args([ + "build", + "--release", + "--target", + TARGET_TRIPLE, + "--manifest-path", + cargo_manifest + .to_str() + .ok_or("invalid Cargo manifest path")?, + ]) + .status() + .map_err(|err| format!("failed to run cargo build: {err}"))?; + if !status.success() { + return Err(format!("cargo build failed with status {status}")); + } + + let workspace_root = find_workspace_root(manifest_dir); + let artifact = locate_artifact(&workspace_root, manifest_dir, &crate_name)?; + let pkg_dir = workspace_root.join("pkg"); + fs::create_dir_all(&pkg_dir) + .map_err(|err| format!("failed to create {}: {err}", pkg_dir.display()))?; + let dest = pkg_dir.join(format!("{}.wasm", crate_name.replace('-', "_"))); + fs::copy(&artifact, &dest) + .map_err(|err| format!("failed to copy artifact to {}: {err}", dest.display()))?; + + Ok(dest) } -#[ctor] -fn register_ctor() { - register(); +/// # Errors +/// Returns an error if the Cloudflare wrangler deploy command fails. +#[inline] +pub fn deploy(extra_args: &[String]) -> Result<(), String> { + let manifest = + find_wrangler_manifest(env::current_dir().map_err(|err| err.to_string())?.as_path())?; + let manifest_dir = manifest + .parent() + .ok_or_else(|| "wrangler manifest has no parent directory".to_owned())?; + let config = manifest + .to_str() + .ok_or_else(|| "invalid wrangler config path".to_owned())?; + + let status = Command::new("wrangler") + .args(["deploy", "--config", config]) + .args(extra_args) + .current_dir(manifest_dir) + .status() + .map_err(|err| format!("failed to run wrangler CLI: {err}"))?; + if !status.success() { + return Err(format!("wrangler deploy failed with status {status}")); + } + + Ok(()) } fn find_wrangler_manifest(start: &Path) -> Result { @@ -257,18 +223,15 @@ fn find_wrangler_manifest(start: &Path) -> Result { .filter_map(Result::ok) .map(|entry| entry.path().to_path_buf()) .filter(|path| { - path.file_name() - .map(|n| n == "wrangler.toml") - .unwrap_or(false) + path.file_name().is_some_and(|n| n == "wrangler.toml") && path .parent() - .map(|dir| dir.join("Cargo.toml").exists()) - .unwrap_or(false) + .is_some_and(|dir| dir.join("Cargo.toml").exists()) }) .collect(); if candidates.is_empty() { - return Err("could not locate wrangler.toml".to_string()); + return Err("could not locate wrangler.toml".to_owned()); } candidates.sort_by_key(|path| { @@ -286,7 +249,7 @@ fn locate_artifact( ) -> Result { let release_name = format!("{}.wasm", crate_name.replace('-', "_")); - if let Some(custom) = std::env::var_os("CARGO_TARGET_DIR") { + if let Some(custom) = env::var_os("CARGO_TARGET_DIR") { let candidate = PathBuf::from(custom) .join(TARGET_TRIPLE) .join("release") @@ -315,7 +278,43 @@ fn locate_artifact( } Err(format!( - "compiled artifact not found for {} (looked in manifest and workspace target directories)", - crate_name + "compiled artifact not found for {crate_name} (looked in manifest and workspace target directories)" )) } + +#[inline] +pub fn register() { + register_adapter(&CLOUDFLARE_ADAPTER); + register_adapter_blueprint(&CLOUDFLARE_BLUEPRINT); +} + +#[ctor] +fn register_ctor() { + register(); +} + +/// # Errors +/// Returns an error if the Cloudflare wrangler dev command fails. +#[inline] +pub fn serve(extra_args: &[String]) -> Result<(), String> { + let manifest = + find_wrangler_manifest(env::current_dir().map_err(|err| err.to_string())?.as_path())?; + let manifest_dir = manifest + .parent() + .ok_or_else(|| "wrangler manifest has no parent directory".to_owned())?; + let config = manifest + .to_str() + .ok_or_else(|| "invalid wrangler config path".to_owned())?; + + let status = Command::new("wrangler") + .args(["dev", "--config", config]) + .args(extra_args) + .current_dir(manifest_dir) + .status() + .map_err(|err| format!("failed to run wrangler CLI: {err}"))?; + if !status.success() { + return Err(format!("wrangler dev failed with status {status}")); + } + + Ok(()) +} diff --git a/crates/edgezero-adapter-cloudflare/src/key_value_store.rs b/crates/edgezero-adapter-cloudflare/src/key_value_store.rs index 2256691..d94466d 100644 --- a/crates/edgezero-adapter-cloudflare/src/key_value_store.rs +++ b/crates/edgezero-adapter-cloudflare/src/key_value_store.rs @@ -116,6 +116,10 @@ impl KvStore for CloudflareKvStore { .filter(|cursor| !cursor.is_empty()), }) } + + async fn exists(&self, key: &str) -> Result { + Ok(self.get_bytes(key).await?.is_some()) + } } // TODO: integration tests require a wasm32 target + wrangler. diff --git a/crates/edgezero-adapter-cloudflare/src/lib.rs b/crates/edgezero-adapter-cloudflare/src/lib.rs index d60b8ef..c4e1e22 100644 --- a/crates/edgezero-adapter-cloudflare/src/lib.rs +++ b/crates/edgezero-adapter-cloudflare/src/lib.rs @@ -6,39 +6,29 @@ pub mod cli; #[cfg(all(feature = "cloudflare", target_arch = "wasm32"))] pub mod config_store; #[cfg(all(feature = "cloudflare", target_arch = "wasm32"))] -mod context; +pub mod context; #[cfg(all(feature = "cloudflare", target_arch = "wasm32"))] pub mod key_value_store; #[cfg(all(feature = "cloudflare", target_arch = "wasm32"))] -mod proxy; +pub mod proxy; #[cfg(all(feature = "cloudflare", target_arch = "wasm32"))] -mod request; +pub mod request; #[cfg(all(feature = "cloudflare", target_arch = "wasm32"))] -mod response; +pub mod response; #[cfg(all(feature = "cloudflare", target_arch = "wasm32"))] pub mod secret_store; -#[cfg(all(feature = "cloudflare", target_arch = "wasm32"))] -pub use config_store::CloudflareConfigStore; -#[cfg(all(feature = "cloudflare", target_arch = "wasm32"))] -pub use context::CloudflareRequestContext; -#[cfg(all(feature = "cloudflare", target_arch = "wasm32"))] -pub use proxy::CloudflareProxyClient; -#[cfg(all(feature = "cloudflare", target_arch = "wasm32"))] -#[allow(deprecated)] -pub use request::{ - dispatch, dispatch_with_config, dispatch_with_config_handle, dispatch_with_kv, - dispatch_with_kv_and_secrets, dispatch_with_secrets, into_core_request, DEFAULT_KV_BINDING, -}; -#[cfg(all(feature = "cloudflare", target_arch = "wasm32"))] -pub use response::from_core_response; - +/// # Errors +/// Returns [`log::SetLoggerError`] if a global logger is already installed. #[cfg(all(feature = "cloudflare", target_arch = "wasm32"))] pub fn init_logger() -> Result<(), log::SetLoggerError> { Ok(()) } +/// # Errors +/// Never; this is a no-op stub on non-wasm targets. #[cfg(not(all(feature = "cloudflare", target_arch = "wasm32")))] +#[inline] pub fn init_logger() -> Result<(), log::SetLoggerError> { Ok(()) } @@ -60,7 +50,6 @@ pub trait AppExt { #[cfg(all(feature = "cloudflare", target_arch = "wasm32"))] impl AppExt for edgezero_core::app::App { - #[allow(deprecated)] fn dispatch<'a>( &'a self, req: worker::Request, @@ -86,7 +75,8 @@ pub async fn run_app( ctx: worker::Context, ) -> Result { init_logger().expect("init cloudflare logger"); - let manifest_loader = edgezero_core::manifest::ManifestLoader::load_from_str(manifest_src); + let manifest_loader = edgezero_core::manifest::ManifestLoader::try_load_from_str(manifest_src) + .map_err(|err| worker::Error::RustError(err.to_string()))?; let manifest = manifest_loader.manifest(); let kv_binding = manifest.kv_store_name(edgezero_core::app::CLOUDFLARE_ADAPTER); let kv_required = manifest.stores.kv.is_some(); diff --git a/crates/edgezero-adapter-cloudflare/src/request.rs b/crates/edgezero-adapter-cloudflare/src/request.rs index 3575a96..7228362 100644 --- a/crates/edgezero-adapter-cloudflare/src/request.rs +++ b/crates/edgezero-adapter-cloudflare/src/request.rs @@ -2,9 +2,9 @@ use std::collections::BTreeSet; use std::sync::{Arc, Mutex, OnceLock}; use crate::config_store::CloudflareConfigStore; +use crate::context::CloudflareRequestContext; use crate::proxy::CloudflareProxyClient; use crate::response::from_core_response; -use crate::CloudflareRequestContext; use edgezero_core::app::App; use edgezero_core::body::Body; use edgezero_core::config_store::ConfigStoreHandle; @@ -307,7 +307,10 @@ async fn dispatch_core_request( core_request.extensions_mut().insert(handle); } let svc = app.router().clone(); - let response = svc.oneshot(core_request).await; + let response = svc + .oneshot(core_request) + .await + .map_err(edge_error_to_worker)?; from_core_response(response).map_err(edge_error_to_worker) } diff --git a/crates/edgezero-adapter-cloudflare/src/templates/Cargo.toml.hbs b/crates/edgezero-adapter-cloudflare/src/templates/Cargo.toml.hbs index 1b9bd7c..f1b4076 100644 --- a/crates/edgezero-adapter-cloudflare/src/templates/Cargo.toml.hbs +++ b/crates/edgezero-adapter-cloudflare/src/templates/Cargo.toml.hbs @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" publish = false +[lints] +workspace = true + [[bin]] name = "{{proj_cloudflare}}" path = "src/main.rs" diff --git a/crates/edgezero-adapter-cloudflare/tests/contract.rs b/crates/edgezero-adapter-cloudflare/tests/contract.rs index e74b50d..7fff3c7 100644 --- a/crates/edgezero-adapter-cloudflare/tests/contract.rs +++ b/crates/edgezero-adapter-cloudflare/tests/contract.rs @@ -3,10 +3,11 @@ #![allow(deprecated)] use bytes::Bytes; -use edgezero_adapter_cloudflare::{ - dispatch, dispatch_with_config, dispatch_with_config_handle, from_core_response, - into_core_request, CloudflareRequestContext, +use edgezero_adapter_cloudflare::context::CloudflareRequestContext; +use edgezero_adapter_cloudflare::request::{ + dispatch, dispatch_with_config, dispatch_with_config_handle, into_core_request, }; +use edgezero_adapter_cloudflare::response::from_core_response; use edgezero_core::{ app::App, body::Body, @@ -43,7 +44,7 @@ fn build_test_app() -> App { } async fn mirror_body(ctx: RequestContext) -> Result { - let bytes = ctx.request().body().as_bytes().to_vec(); + let bytes = ctx.request().body().as_bytes().expect("buffered").to_vec(); let response = response_builder() .status(StatusCode::OK) .body(Body::from(bytes)) @@ -145,7 +146,10 @@ async fn into_core_request_preserves_method_uri_headers_body_and_context() { .and_then(|value| value.to_str().ok()); assert_eq!(header, Some("1")); - assert_eq!(core_request.body().as_bytes(), b"payload"); + assert_eq!( + core_request.body().as_bytes().expect("buffered"), + b"payload" + ); assert!(CloudflareRequestContext::get(&core_request).is_some()); } diff --git a/crates/edgezero-adapter-fastly/Cargo.toml b/crates/edgezero-adapter-fastly/Cargo.toml index f052e57..3b92303 100644 --- a/crates/edgezero-adapter-fastly/Cargo.toml +++ b/crates/edgezero-adapter-fastly/Cargo.toml @@ -5,6 +5,9 @@ version = { workspace = true } authors = { workspace = true } license = { workspace = true } +[lints] +workspace = true + [features] default = [] cli = [ @@ -34,6 +37,7 @@ log = { workspace = true } log-fastly = { workspace = true, optional = true } fern = { workspace = true } chrono = { workspace = true } +thiserror = { workspace = true } walkdir = { workspace = true, optional = true } [dev-dependencies] diff --git a/crates/edgezero-adapter-fastly/src/cli.rs b/crates/edgezero-adapter-fastly/src/cli.rs index 8678780..61683c1 100644 --- a/crates/edgezero-adapter-fastly/src/cli.rs +++ b/crates/edgezero-adapter-fastly/src/cli.rs @@ -1,3 +1,4 @@ +use std::env; use std::fs; use std::path::{Path, PathBuf}; use std::process::Command; @@ -6,163 +7,14 @@ use ctor::ctor; use edgezero_adapter::cli_support::{ find_manifest_upwards, find_workspace_root, path_distance, read_package_name, }; +use edgezero_adapter::registry::{register_adapter, Adapter, AdapterAction}; use edgezero_adapter::scaffold::{ register_adapter_blueprint, AdapterBlueprint, AdapterFileSpec, CommandTemplates, DependencySpec, LoggingDefaults, ManifestSpec, ReadmeInfo, TemplateRegistration, }; -use edgezero_adapter::{register_adapter, Adapter, AdapterAction}; use walkdir::WalkDir; -pub fn build(extra_args: &[String]) -> Result { - let manifest = find_fastly_manifest( - std::env::current_dir() - .map_err(|e| e.to_string())? - .as_path(), - )?; - let manifest_dir = manifest - .parent() - .ok_or_else(|| "fastly manifest has no parent directory".to_string())?; - let cargo_manifest = manifest_dir.join("Cargo.toml"); - let crate_name = read_package_name(&cargo_manifest)?; - - let status = Command::new("cargo") - .args([ - "build", - "--release", - "--target", - "wasm32-wasip1", - "--manifest-path", - cargo_manifest - .to_str() - .ok_or("invalid Cargo manifest path")?, - ]) - .args(extra_args) - .status() - .map_err(|e| format!("failed to run cargo build: {e}"))?; - if !status.success() { - return Err(format!("cargo build failed with status {status}")); - } - - let workspace_root = find_workspace_root(manifest_dir); - let artifact = locate_artifact(&workspace_root, manifest_dir, &crate_name)?; - let pkg_dir = workspace_root.join("pkg"); - fs::create_dir_all(&pkg_dir) - .map_err(|e| format!("failed to create {}: {e}", pkg_dir.display()))?; - let dest = pkg_dir.join(format!("{}.wasm", crate_name.replace('-', "_"))); - fs::copy(&artifact, &dest) - .map_err(|e| format!("failed to copy artifact to {}: {e}", dest.display()))?; - - Ok(dest) -} - -pub fn deploy(extra_args: &[String]) -> Result<(), String> { - let manifest = find_fastly_manifest( - std::env::current_dir() - .map_err(|e| e.to_string())? - .as_path(), - )?; - let manifest_dir = manifest - .parent() - .ok_or_else(|| "fastly manifest has no parent directory".to_string())?; - - let status = Command::new("fastly") - .args(["compute", "deploy"]) - .args(extra_args) - .current_dir(manifest_dir) - .status() - .map_err(|e| format!("failed to run fastly CLI: {e}"))?; - if !status.success() { - return Err(format!("fastly compute deploy failed with status {status}")); - } - - Ok(()) -} - -pub fn serve(extra_args: &[String]) -> Result<(), String> { - let manifest = find_fastly_manifest( - std::env::current_dir() - .map_err(|e| e.to_string())? - .as_path(), - )?; - let manifest_dir = manifest - .parent() - .ok_or_else(|| "fastly manifest has no parent directory".to_string())?; - - let status = Command::new("fastly") - .args(["compute", "serve"]) - .args(extra_args) - .current_dir(manifest_dir) - .status() - .map_err(|e| format!("failed to run fastly CLI: {e}"))?; - if !status.success() { - return Err(format!("fastly compute serve failed with status {status}")); - } - - Ok(()) -} - -struct FastlyCliAdapter; - -static FASTLY_TEMPLATE_REGISTRATIONS: &[TemplateRegistration] = &[ - TemplateRegistration { - name: "fastly_Cargo_toml", - contents: include_str!("templates/Cargo.toml.hbs"), - }, - TemplateRegistration { - name: "fastly_src_main_rs", - contents: include_str!("templates/src/main.rs.hbs"), - }, - TemplateRegistration { - name: "fastly_cargo_config_toml", - contents: include_str!("templates/.cargo/config.toml.hbs"), - }, - TemplateRegistration { - name: "fastly_fastly_toml", - contents: include_str!("templates/fastly.toml.hbs"), - }, -]; - -static FASTLY_FILE_SPECS: &[AdapterFileSpec] = &[ - AdapterFileSpec { - template: "fastly_Cargo_toml", - output: "Cargo.toml", - }, - AdapterFileSpec { - template: "fastly_src_main_rs", - output: "src/main.rs", - }, - AdapterFileSpec { - template: "fastly_cargo_config_toml", - output: ".cargo/config.toml", - }, - AdapterFileSpec { - template: "fastly_fastly_toml", - output: "fastly.toml", - }, -]; - -static FASTLY_DEPENDENCIES: &[DependencySpec] = &[ - DependencySpec { - key: "dep_edgezero_core_fastly", - repo_crate: "crates/edgezero-core", - fallback: "edgezero-core = { git = \"https://git@github.com/stackpop/edgezero.git\", package = \"edgezero-core\", default-features = false }", - features: &[], - }, - DependencySpec { - key: "dep_edgezero_adapter_fastly", - repo_crate: "crates/edgezero-adapter-fastly", - fallback: - "edgezero-adapter-fastly = { git = \"https://git@github.com/stackpop/edgezero.git\", package = \"edgezero-adapter-fastly\", default-features = false }", - features: &[], - }, - DependencySpec { - key: "dep_edgezero_adapter_fastly_wasm", - repo_crate: "crates/edgezero-adapter-fastly", - fallback: - "edgezero-adapter-fastly = { git = \"https://git@github.com/stackpop/edgezero.git\", package = \"edgezero-adapter-fastly\", default-features = false, features = [\"fastly\"] }", - features: &["fastly"], - }, -]; +static FASTLY_ADAPTER: FastlyCliAdapter = FastlyCliAdapter; static FASTLY_BLUEPRINT: AdapterBlueprint = AdapterBlueprint { id: "fastly", @@ -198,34 +50,151 @@ static FASTLY_BLUEPRINT: AdapterBlueprint = AdapterBlueprint { run_module: "edgezero_adapter_fastly", }; -static FASTLY_ADAPTER: FastlyCliAdapter = FastlyCliAdapter; +static FASTLY_DEPENDENCIES: &[DependencySpec] = &[ + DependencySpec { + key: "dep_edgezero_core_fastly", + repo_crate: "crates/edgezero-core", + fallback: "edgezero-core = { git = \"https://git@github.com/stackpop/edgezero.git\", package = \"edgezero-core\", default-features = false }", + features: &[], + }, + DependencySpec { + key: "dep_edgezero_adapter_fastly", + repo_crate: "crates/edgezero-adapter-fastly", + fallback: + "edgezero-adapter-fastly = { git = \"https://git@github.com/stackpop/edgezero.git\", package = \"edgezero-adapter-fastly\", default-features = false }", + features: &[], + }, + DependencySpec { + key: "dep_edgezero_adapter_fastly_wasm", + repo_crate: "crates/edgezero-adapter-fastly", + fallback: + "edgezero-adapter-fastly = { git = \"https://git@github.com/stackpop/edgezero.git\", package = \"edgezero-adapter-fastly\", default-features = false, features = [\"fastly\"] }", + features: &["fastly"], + }, +]; -impl Adapter for FastlyCliAdapter { - fn name(&self) -> &'static str { - "fastly" - } +static FASTLY_FILE_SPECS: &[AdapterFileSpec] = &[ + AdapterFileSpec { + template: "fastly_Cargo_toml", + output: "Cargo.toml", + }, + AdapterFileSpec { + template: "fastly_src_main_rs", + output: "src/main.rs", + }, + AdapterFileSpec { + template: "fastly_cargo_config_toml", + output: ".cargo/config.toml", + }, + AdapterFileSpec { + template: "fastly_fastly_toml", + output: "fastly.toml", + }, +]; + +static FASTLY_TEMPLATE_REGISTRATIONS: &[TemplateRegistration] = &[ + TemplateRegistration { + name: "fastly_Cargo_toml", + contents: include_str!("templates/Cargo.toml.hbs"), + }, + TemplateRegistration { + name: "fastly_src_main_rs", + contents: include_str!("templates/src/main.rs.hbs"), + }, + TemplateRegistration { + name: "fastly_cargo_config_toml", + contents: include_str!("templates/.cargo/config.toml.hbs"), + }, + TemplateRegistration { + name: "fastly_fastly_toml", + contents: include_str!("templates/fastly.toml.hbs"), + }, +]; +struct FastlyCliAdapter; + +impl Adapter for FastlyCliAdapter { fn execute(&self, action: AdapterAction, args: &[String]) -> Result<(), String> { match action { AdapterAction::Build => { let artifact = build(args)?; - println!("[edgezero] Fastly build complete -> {}", artifact.display()); + log::info!("[edgezero] Fastly build complete -> {}", artifact.display()); Ok(()) } AdapterAction::Deploy => deploy(args), AdapterAction::Serve => serve(args), + other => Err(format!("fastly adapter does not support {other:?}")), } } + + fn name(&self) -> &'static str { + "fastly" + } } -pub fn register() { - register_adapter(&FASTLY_ADAPTER); - register_adapter_blueprint(&FASTLY_BLUEPRINT); +/// # Errors +/// Returns an error if the Fastly CLI build command fails. +#[inline] +pub fn build(extra_args: &[String]) -> Result { + let manifest = + find_fastly_manifest(env::current_dir().map_err(|err| err.to_string())?.as_path())?; + let manifest_dir = manifest + .parent() + .ok_or_else(|| "fastly manifest has no parent directory".to_owned())?; + let cargo_manifest = manifest_dir.join("Cargo.toml"); + let crate_name = read_package_name(&cargo_manifest)?; + + let status = Command::new("cargo") + .args([ + "build", + "--release", + "--target", + "wasm32-wasip1", + "--manifest-path", + cargo_manifest + .to_str() + .ok_or("invalid Cargo manifest path")?, + ]) + .args(extra_args) + .status() + .map_err(|err| format!("failed to run cargo build: {err}"))?; + if !status.success() { + return Err(format!("cargo build failed with status {status}")); + } + + let workspace_root = find_workspace_root(manifest_dir); + let artifact = locate_artifact(&workspace_root, manifest_dir, &crate_name)?; + let pkg_dir = workspace_root.join("pkg"); + fs::create_dir_all(&pkg_dir) + .map_err(|err| format!("failed to create {}: {err}", pkg_dir.display()))?; + let dest = pkg_dir.join(format!("{}.wasm", crate_name.replace('-', "_"))); + fs::copy(&artifact, &dest) + .map_err(|err| format!("failed to copy artifact to {}: {err}", dest.display()))?; + + Ok(dest) } -#[ctor] -fn register_ctor() { - register(); +/// # Errors +/// Returns an error if the Fastly CLI deploy command fails. +#[inline] +pub fn deploy(extra_args: &[String]) -> Result<(), String> { + let manifest = + find_fastly_manifest(env::current_dir().map_err(|err| err.to_string())?.as_path())?; + let manifest_dir = manifest + .parent() + .ok_or_else(|| "fastly manifest has no parent directory".to_owned())?; + + let status = Command::new("fastly") + .args(["compute", "deploy"]) + .args(extra_args) + .current_dir(manifest_dir) + .status() + .map_err(|err| format!("failed to run fastly CLI: {err}"))?; + if !status.success() { + return Err(format!("fastly compute deploy failed with status {status}")); + } + + Ok(()) } fn find_fastly_manifest(start: &Path) -> Result { @@ -241,18 +210,15 @@ fn find_fastly_manifest(start: &Path) -> Result { .filter_map(Result::ok) .map(|entry| entry.path().to_path_buf()) .filter(|path| { - path.file_name() - .map(|n| n == "fastly.toml") - .unwrap_or(false) + path.file_name().is_some_and(|n| n == "fastly.toml") && path .parent() - .map(|dir| dir.join("Cargo.toml").exists()) - .unwrap_or(false) + .is_some_and(|dir| dir.join("Cargo.toml").exists()) }) .collect(); if candidates.is_empty() { - return Err("could not locate fastly.toml".to_string()); + return Err("could not locate fastly.toml".to_owned()); } candidates.sort_by_key(|path| { @@ -271,7 +237,7 @@ fn locate_artifact( let target_triple = "wasm32-wasip1"; let release_name = format!("{}.wasm", crate_name.replace('-', "_")); - if let Some(custom) = std::env::var_os("CARGO_TARGET_DIR") { + if let Some(custom) = env::var_os("CARGO_TARGET_DIR") { let candidate = PathBuf::from(custom) .join(target_triple) .join("release") @@ -305,6 +271,40 @@ fn locate_artifact( )) } +#[inline] +pub fn register() { + register_adapter(&FASTLY_ADAPTER); + register_adapter_blueprint(&FASTLY_BLUEPRINT); +} + +#[ctor] +fn register_ctor() { + register(); +} + +/// # Errors +/// Returns an error if the Fastly CLI serve command (Viceroy) fails. +#[inline] +pub fn serve(extra_args: &[String]) -> Result<(), String> { + let manifest = + find_fastly_manifest(env::current_dir().map_err(|err| err.to_string())?.as_path())?; + let manifest_dir = manifest + .parent() + .ok_or_else(|| "fastly manifest has no parent directory".to_owned())?; + + let status = Command::new("fastly") + .args(["compute", "serve"]) + .args(extra_args) + .current_dir(manifest_dir) + .status() + .map_err(|err| format!("failed to run fastly CLI: {err}"))?; + if !status.success() { + return Err(format!("fastly compute serve failed with status {status}")); + } + + Ok(()) +} + #[cfg(test)] mod tests { use super::*; @@ -312,32 +312,34 @@ mod tests { use tempfile::tempdir; #[test] - fn finds_manifest_in_current_directory() { + fn finds_closest_manifest_when_multiple_exist() { let dir = tempdir().unwrap(); let root = dir.path(); fs::write(root.join("Cargo.toml"), "[workspace]").unwrap(); - fs::write(root.join("fastly.toml"), "name = \"demo\"").unwrap(); - let manifest = find_fastly_manifest(root).expect("should find manifest"); - assert_eq!(manifest, root.join("fastly.toml")); - } + let first = root.join("crates/first"); + fs::create_dir_all(&first).unwrap(); + fs::write(first.join("Cargo.toml"), "[package]\nname=\"first\"").unwrap(); + fs::write(first.join("fastly.toml"), "name=\"first\"").unwrap(); - #[test] - fn read_package_prefers_package_table() { - let dir = tempdir().unwrap(); - let manifest = dir.path().join("Cargo.toml"); - fs::write(&manifest, "[package]\nname = \"demo\"\n").unwrap(); - let name = read_package_name(&manifest).unwrap(); - assert_eq!(name, "demo"); + let second = root.join("examples/second"); + fs::create_dir_all(&second).unwrap(); + fs::write(second.join("Cargo.toml"), "[package]\nname=\"second\"").unwrap(); + fs::write(second.join("fastly.toml"), "name=\"second\"").unwrap(); + + let found = find_fastly_manifest(&second).unwrap(); + assert_eq!(found, second.join("fastly.toml")); } #[test] - fn read_package_falls_back_to_name() { + fn finds_manifest_in_current_directory() { let dir = tempdir().unwrap(); - let manifest = dir.path().join("Cargo.toml"); - fs::write(&manifest, "name = \"demo\"").unwrap(); - let name = read_package_name(&manifest).unwrap(); - assert_eq!(name, "demo"); + let root = dir.path(); + fs::write(root.join("Cargo.toml"), "[workspace]").unwrap(); + fs::write(root.join("fastly.toml"), "name = \"demo\"").unwrap(); + + let manifest = find_fastly_manifest(root).expect("should find manifest"); + assert_eq!(manifest, root.join("fastly.toml")); } #[test] @@ -355,22 +357,20 @@ mod tests { } #[test] - fn finds_closest_manifest_when_multiple_exist() { + fn read_package_falls_back_to_name() { let dir = tempdir().unwrap(); - let root = dir.path(); - fs::write(root.join("Cargo.toml"), "[workspace]").unwrap(); - - let first = root.join("crates/first"); - fs::create_dir_all(&first).unwrap(); - fs::write(first.join("Cargo.toml"), "[package]\nname=\"first\"").unwrap(); - fs::write(first.join("fastly.toml"), "name=\"first\"").unwrap(); - - let second = root.join("examples/second"); - fs::create_dir_all(&second).unwrap(); - fs::write(second.join("Cargo.toml"), "[package]\nname=\"second\"").unwrap(); - fs::write(second.join("fastly.toml"), "name=\"second\"").unwrap(); + let manifest = dir.path().join("Cargo.toml"); + fs::write(&manifest, "name = \"demo\"").unwrap(); + let name = read_package_name(&manifest).unwrap(); + assert_eq!(name, "demo"); + } - let found = find_fastly_manifest(&second).unwrap(); - assert_eq!(found, second.join("fastly.toml")); + #[test] + fn read_package_prefers_package_table() { + let dir = tempdir().unwrap(); + let manifest = dir.path().join("Cargo.toml"); + fs::write(&manifest, "[package]\nname = \"demo\"\n").unwrap(); + let name = read_package_name(&manifest).unwrap(); + assert_eq!(name, "demo"); } } diff --git a/crates/edgezero-adapter-fastly/src/config_store.rs b/crates/edgezero-adapter-fastly/src/config_store.rs index b7affd0..e6834f9 100644 --- a/crates/edgezero-adapter-fastly/src/config_store.rs +++ b/crates/edgezero-adapter-fastly/src/config_store.rs @@ -4,6 +4,8 @@ use std::collections::HashMap; use edgezero_core::config_store::{ConfigStore, ConfigStoreError}; +use fastly::config_store::{LookupError, OpenError}; +use fastly::ConfigStore as FastlyConfigStoreInner; /// Config store backed by a Fastly Config Store resource link. pub struct FastlyConfigStore { @@ -11,43 +13,56 @@ pub struct FastlyConfigStore { } enum FastlyConfigStoreBackend { - Fastly(fastly::ConfigStore), + Fastly(FastlyConfigStoreInner), #[cfg(test)] InMemory(HashMap), } impl FastlyConfigStore { - /// Open a Fastly Config Store by resource link name. - /// - /// Returns an error if the configured store cannot be opened. - pub fn try_open(name: &str) -> Result { - fastly::ConfigStore::try_open(name).map(|inner| Self { - inner: FastlyConfigStoreBackend::Fastly(inner), - }) - } - #[cfg(test)] fn from_entries(entries: impl IntoIterator) -> Self { Self { inner: FastlyConfigStoreBackend::InMemory(entries.into_iter().collect()), } } + + /// Open a Fastly Config Store by resource link name. + /// + /// Returns an error if the configured store cannot be opened. + /// + /// # Errors + /// Returns the underlying [`fastly::config_store::OpenError`] when the named store does not exist or cannot be opened. + #[inline] + pub fn try_open(name: &str) -> Result { + FastlyConfigStoreInner::try_open(name).map(|inner| Self { + inner: FastlyConfigStoreBackend::Fastly(inner), + }) + } } impl ConfigStore for FastlyConfigStore { + #[inline] fn get(&self, key: &str) -> Result, ConfigStoreError> { match &self.inner { - FastlyConfigStoreBackend::Fastly(inner) => inner.try_get(key).map_err(map_lookup_error), + FastlyConfigStoreBackend::Fastly(inner) => { + inner.try_get(key).map_err(|err| map_lookup_error(&err)) + } #[cfg(test)] FastlyConfigStoreBackend::InMemory(data) => Ok(data.get(key).cloned()), } } } -fn map_lookup_error(err: fastly::config_store::LookupError) -> ConfigStoreError { +fn map_lookup_error(err: &LookupError) -> ConfigStoreError { + // `LookupError` is from the `fastly` crate; using a wildcard arm guards + // against new variants being added in upstream point releases without + // forcing us into a breaking match every bump. + #[expect( + clippy::wildcard_enum_match_arm, + reason = "external enum; new variants must remain unavailable→unavailable" + )] match err { - fastly::config_store::LookupError::KeyInvalid - | fastly::config_store::LookupError::KeyTooLong => { + LookupError::KeyInvalid | LookupError::KeyTooLong => { ConfigStoreError::invalid_key("invalid config key") } _ => { @@ -63,20 +78,20 @@ mod tests { edgezero_core::config_store_contract_tests!(fastly_config_store_contract, { FastlyConfigStore::from_entries([ - ("contract.key.a".to_string(), "value_a".to_string()), - ("contract.key.b".to_string(), "value_b".to_string()), + ("contract.key.a".to_owned(), "value_a".to_owned()), + ("contract.key.b".to_owned(), "value_b".to_owned()), ]) }); #[test] fn key_invalid_maps_to_invalid_key_error() { - let err = map_lookup_error(fastly::config_store::LookupError::KeyInvalid); + let err = map_lookup_error(&LookupError::KeyInvalid); assert!(matches!(err, ConfigStoreError::InvalidKey { .. })); } #[test] fn key_too_long_maps_to_invalid_key_error() { - let err = map_lookup_error(fastly::config_store::LookupError::KeyTooLong); + let err = map_lookup_error(&LookupError::KeyTooLong); assert!(matches!(err, ConfigStoreError::InvalidKey { .. })); } } diff --git a/crates/edgezero-adapter-fastly/src/context.rs b/crates/edgezero-adapter-fastly/src/context.rs index 54f0708..07b4620 100644 --- a/crates/edgezero-adapter-fastly/src/context.rs +++ b/crates/edgezero-adapter-fastly/src/context.rs @@ -9,13 +9,15 @@ pub struct FastlyRequestContext { } impl FastlyRequestContext { - pub fn insert(request: &mut Request, context: FastlyRequestContext) { - request.extensions_mut().insert(context); - } - + #[inline] pub fn get(request: &Request) -> Option<&FastlyRequestContext> { request.extensions().get::() } + + #[inline] + pub fn insert(request: &mut Request, context: FastlyRequestContext) { + request.extensions_mut().insert(context); + } } #[cfg(test)] @@ -24,7 +26,7 @@ mod tests { use edgezero_core::body::Body; use edgezero_core::http::request_builder; use std::net::IpAddr; - use std::str::FromStr; + use std::str::FromStr as _; #[test] fn inserts_and_retrieves_client_ip() { diff --git a/crates/edgezero-adapter-fastly/src/key_value_store.rs b/crates/edgezero-adapter-fastly/src/key_value_store.rs index 98d7d47..111d18f 100644 --- a/crates/edgezero-adapter-fastly/src/key_value_store.rs +++ b/crates/edgezero-adapter-fastly/src/key_value_store.rs @@ -13,6 +13,8 @@ use bytes::Bytes; #[cfg(feature = "fastly")] use edgezero_core::key_value_store::{KvError, KvPage, KvStore}; #[cfg(feature = "fastly")] +use fastly::kv_store::{KVStore, KVStoreError}; +#[cfg(feature = "fastly")] use std::time::Duration; /// KV store backed by Fastly's KV Store API. @@ -20,7 +22,7 @@ use std::time::Duration; /// Wraps a `fastly::kv_store::KVStore` handle obtained via `KVStore::open(name)`. #[cfg(feature = "fastly")] pub struct FastlyKvStore { - store: fastly::kv_store::KVStore, + store: KVStore, } #[cfg(feature = "fastly")] @@ -28,9 +30,13 @@ impl FastlyKvStore { /// Open a Fastly KV Store by name. /// /// Returns `KvError::Unavailable` if the store does not exist. + /// + /// # Errors + /// Returns [`KvError::Internal`] if the named KV store cannot be opened. + #[inline] pub fn open(name: &str) -> Result { - let store = fastly::kv_store::KVStore::open(name) - .map_err(|e| KvError::Internal(anyhow::anyhow!("failed to open kv store: {e}")))? + let store = KVStore::open(name) + .map_err(|err| KvError::Internal(anyhow::anyhow!("failed to open kv store: {err}")))? .ok_or(KvError::Unavailable)?; Ok(Self { store }) } @@ -39,70 +45,80 @@ impl FastlyKvStore { #[cfg(feature = "fastly")] #[async_trait(?Send)] impl KvStore for FastlyKvStore { + #[inline] + async fn delete(&self, key: &str) -> Result<(), KvError> { + self.store + .delete(key) + .map_err(|err| KvError::Internal(anyhow::anyhow!("delete failed: {err}"))) + } + + #[inline] + async fn exists(&self, key: &str) -> Result { + Ok(self.get_bytes(key).await?.is_some()) + } + + #[inline] async fn get_bytes(&self, key: &str) -> Result, KvError> { match self.store.lookup(key) { Ok(mut response) => { let bytes = response.take_body_bytes(); Ok(Some(Bytes::from(bytes))) } - Err(fastly::kv_store::KVStoreError::ItemNotFound) => Ok(None), - Err(e) => Err(KvError::Internal(anyhow::anyhow!("lookup failed: {e}"))), + Err(KVStoreError::ItemNotFound) => Ok(None), + Err(err) => Err(KvError::Internal(anyhow::anyhow!("lookup failed: {err}"))), } } - async fn put_bytes(&self, key: &str, value: Bytes) -> Result<(), KvError> { - self.store - .insert(key, value.as_ref()) - .map_err(|e| KvError::Internal(anyhow::anyhow!("insert failed: {e}"))) - } - - async fn put_bytes_with_ttl( - &self, - key: &str, - value: Bytes, - ttl: Duration, - ) -> Result<(), KvError> { - self.store - .build_insert() - .time_to_live(ttl) - .execute(key, value.as_ref()) - .map_err(|e| KvError::Internal(anyhow::anyhow!("insert with ttl failed: {e}"))) - } - - async fn delete(&self, key: &str) -> Result<(), KvError> { - self.store - .delete(key) - .map_err(|e| KvError::Internal(anyhow::anyhow!("delete failed: {e}"))) - } - + #[inline] async fn list_keys_page( &self, prefix: &str, cursor: Option<&str>, limit: usize, ) -> Result { - let limit = u32::try_from(limit) - .map_err(|_| KvError::Validation("list limit exceeds u32".to_string()))?; + let limit_u32 = u32::try_from(limit) + .map_err(|_e| KvError::Validation("list limit exceeds u32".to_owned()))?; - let mut request = self.store.build_list().limit(limit); + let mut request = self.store.build_list().limit(limit_u32); if !prefix.is_empty() { request = request.prefix(prefix); } - if let Some(cursor) = cursor.filter(|cursor| !cursor.is_empty()) { - request = request.cursor(cursor); + if let Some(token) = cursor.filter(|token| !token.is_empty()) { + request = request.cursor(token); } let page = request .execute() - .map_err(|e| KvError::Internal(anyhow::anyhow!("list failed: {e}")))?; - let cursor = page.next_cursor().filter(|cursor| !cursor.is_empty()); + .map_err(|err| KvError::Internal(anyhow::anyhow!("list failed: {err}")))?; + let next_cursor = page.next_cursor().filter(|token| !token.is_empty()); Ok(KvPage { + cursor: next_cursor, keys: page.into_keys(), - cursor, }) } + + #[inline] + async fn put_bytes(&self, key: &str, value: Bytes) -> Result<(), KvError> { + self.store + .insert(key, value.as_ref()) + .map_err(|err| KvError::Internal(anyhow::anyhow!("insert failed: {err}"))) + } + + #[inline] + async fn put_bytes_with_ttl( + &self, + key: &str, + value: Bytes, + ttl: Duration, + ) -> Result<(), KvError> { + self.store + .build_insert() + .time_to_live(ttl) + .execute(key, value.as_ref()) + .map_err(|err| KvError::Internal(anyhow::anyhow!("insert with ttl failed: {err}"))) + } } // TODO: integration tests require the Fastly compute environment. diff --git a/crates/edgezero-adapter-fastly/src/lib.rs b/crates/edgezero-adapter-fastly/src/lib.rs index e64a6fe..a8cec40 100644 --- a/crates/edgezero-adapter-fastly/src/lib.rs +++ b/crates/edgezero-adapter-fastly/src/lib.rs @@ -5,66 +5,95 @@ pub mod cli; #[cfg(feature = "fastly")] pub mod config_store; -mod context; +pub mod context; #[cfg(feature = "fastly")] pub mod key_value_store; #[cfg(feature = "fastly")] -mod logger; +pub mod logger; #[cfg(feature = "fastly")] -mod proxy; +pub mod proxy; #[cfg(feature = "fastly")] -mod request; +pub mod request; #[cfg(feature = "fastly")] -mod response; +pub mod response; #[cfg(feature = "fastly")] pub mod secret_store; #[cfg(feature = "fastly")] -pub use config_store::FastlyConfigStore; -pub use context::FastlyRequestContext; +use edgezero_core::app::{App, Hooks, FASTLY_ADAPTER}; #[cfg(feature = "fastly")] -pub use proxy::FastlyProxyClient; +use edgezero_core::manifest::{ManifestLoader, ResolvedLoggingConfig}; #[cfg(feature = "fastly")] -#[allow(deprecated)] -pub use request::{ - dispatch, dispatch_with_config, dispatch_with_config_handle, dispatch_with_kv, - dispatch_with_kv_and_secrets, dispatch_with_secrets, into_core_request, DEFAULT_KV_STORE_NAME, -}; +use request::DEFAULT_KV_STORE_NAME; + #[cfg(feature = "fastly")] -pub use response::from_core_response; +pub trait AppExt { + #[deprecated( + note = "AppExt::dispatch() is the low-level manual path and does not inject config-store metadata; prefer run_app(), dispatch_with_config(), or dispatch_with_config_handle()" + )] + /// # Errors + /// Returns an error if the underlying handler returns an error or the response cannot be converted into a Fastly response. + fn dispatch(&self, req: fastly::Request) -> Result; +} + #[cfg(feature = "fastly")] -pub use secret_store::FastlySecretStore; +impl AppExt for App { + #[inline] + fn dispatch(&self, req: fastly::Request) -> Result { + request::dispatch_raw(self, req) + } +} #[cfg(feature = "fastly")] #[derive(Debug, Clone)] pub struct FastlyLogging { + pub echo_stdout: bool, pub endpoint: Option, pub level: log::LevelFilter, - pub echo_stdout: bool, pub use_fastly_logger: bool, } #[cfg(feature = "fastly")] -impl From for FastlyLogging { - fn from(config: edgezero_core::manifest::ResolvedLoggingConfig) -> Self { +impl From for FastlyLogging { + #[inline] + fn from(config: ResolvedLoggingConfig) -> Self { Self { + echo_stdout: config.echo_stdout.unwrap_or(true), endpoint: config.endpoint, level: config.level.into(), - echo_stdout: config.echo_stdout.unwrap_or(true), use_fastly_logger: true, } } } +/// Whether each optional store is required to be present at startup. +/// +/// Using a named struct instead of positional `bool` arguments prevents +/// accidental parameter swaps between `kv_required` and `secrets_required`. +#[cfg(feature = "fastly")] +#[derive(Default)] +struct StoreRequirements { + kv_required: bool, + secrets_required: bool, +} + +/// # Errors +/// Returns [`logger::InitLoggerError::Build`] if the underlying logger +/// builder rejects its inputs (e.g. an empty endpoint), or +/// [`logger::InitLoggerError::SetLogger`] if a global logger is already +/// installed. #[cfg(feature = "fastly")] +#[inline] pub fn init_logger( endpoint: &str, level: log::LevelFilter, echo_stdout: bool, -) -> Result<(), log::SetLoggerError> { +) -> Result<(), logger::InitLoggerError> { logger::init_logger(endpoint, level, echo_stdout) } +/// # Errors +/// Never; this is a no-op stub on builds without the `fastly` feature. #[cfg(not(feature = "fastly"))] pub fn init_logger( _endpoint: &str, @@ -74,69 +103,59 @@ pub fn init_logger( Ok(()) } -#[cfg(feature = "fastly")] -pub trait AppExt { - #[deprecated( - note = "AppExt::dispatch() is the low-level manual path and does not inject config-store metadata; prefer run_app(), dispatch_with_config(), or dispatch_with_config_handle()" - )] - fn dispatch(&self, req: fastly::Request) -> Result; -} - -#[cfg(feature = "fastly")] -impl AppExt for edgezero_core::app::App { - #[allow(deprecated)] - fn dispatch(&self, req: fastly::Request) -> Result { - crate::request::dispatch_raw(self, req) - } -} - /// Entry point for a Fastly Compute application. /// /// **Breaking change (pre-1.0):** `manifest_src` is now a required parameter. +/// +/// # Errors +/// Returns an error if the manifest is invalid or any required store cannot be opened. #[cfg(feature = "fastly")] -pub fn run_app( +#[inline] +pub fn run_app( manifest_src: &str, req: fastly::Request, ) -> Result { - let manifest_loader = edgezero_core::manifest::ManifestLoader::load_from_str(manifest_src); + let manifest_loader = ManifestLoader::try_load_from_str(manifest_src) + .map_err(|err| fastly::Error::msg(err.to_string()))?; let manifest = manifest_loader.manifest(); - let logging = manifest.logging_or_default(edgezero_core::app::FASTLY_ADAPTER); + let resolved_logging = manifest.logging_or_default(FASTLY_ADAPTER); // Two-path resolution: `A::config_store()` is set at compile time by the // `#[app]` macro and is the common case. The manifest fallback handles // callers that implement `Hooks` manually without the macro — in that case // `A::config_store()` returns `None` while `[stores.config]` in // `edgezero.toml` may still be present. let config_name = A::config_store() - .map(|cfg| { - cfg.name_for_adapter(edgezero_core::app::FASTLY_ADAPTER) - .to_string() - }) + .map(|cfg| cfg.name_for_adapter(FASTLY_ADAPTER).to_owned()) .or_else(|| { - manifest.stores.config.as_ref().map(|cfg| { - cfg.config_store_name(edgezero_core::app::FASTLY_ADAPTER) - .to_string() - }) + manifest + .stores + .config + .as_ref() + .map(|cfg| cfg.config_store_name(FASTLY_ADAPTER).to_owned()) }); - let kv_name = manifest - .kv_store_name(edgezero_core::app::FASTLY_ADAPTER) - .to_string(); + let kv_name = manifest.kv_store_name(FASTLY_ADAPTER).to_owned(); let requirements = StoreRequirements { kv_required: manifest.stores.kv.is_some(), secrets_required: manifest.secret_store_enabled("fastly"), }; + let logging: FastlyLogging = resolved_logging.into(); run_app_with_stores::( - logging.into(), + &logging, req, config_name.as_deref(), &kv_name, - requirements, + &requirements, ) } /// Dispatch with a config store. Prefer this over `run_app_with_logging` for new code. +/// +/// # Errors +/// Returns an error if logger setup fails or the underlying handler returns an error. #[cfg(feature = "fastly")] -pub fn run_app_with_config( - logging: FastlyLogging, +#[inline] +pub fn run_app_with_config( + logging: &FastlyLogging, req: fastly::Request, config_store_name: Option<&str>, ) -> Result { @@ -145,14 +164,18 @@ pub fn run_app_with_config( req, config_store_name, DEFAULT_KV_STORE_NAME, - StoreRequirements::default(), + &StoreRequirements::default(), ) } /// Compatibility wrapper for callers that do not use a config store. +/// +/// # Errors +/// Returns an error if logger setup fails or the underlying handler returns an error. #[cfg(feature = "fastly")] -pub fn run_app_with_logging( - logging: FastlyLogging, +#[inline] +pub fn run_app_with_logging( + logging: &FastlyLogging, req: fastly::Request, ) -> Result { run_app_with_stores::( @@ -160,36 +183,25 @@ pub fn run_app_with_logging( req, None, DEFAULT_KV_STORE_NAME, - StoreRequirements::default(), + &StoreRequirements::default(), ) } -/// Whether each optional store is required to be present at startup. -/// -/// Using a named struct instead of positional `bool` arguments prevents -/// accidental parameter swaps between `kv_required` and `secrets_required`. #[cfg(feature = "fastly")] -#[derive(Default)] -struct StoreRequirements { - kv_required: bool, - secrets_required: bool, -} - -#[cfg(feature = "fastly")] -fn run_app_with_stores( - logging: FastlyLogging, +fn run_app_with_stores( + logging: &FastlyLogging, req: fastly::Request, config_store_name: Option<&str>, kv_store_name: &str, - requirements: StoreRequirements, + requirements: &StoreRequirements, ) -> Result { if logging.use_fastly_logger { let endpoint = logging.endpoint.as_deref().unwrap_or("stdout"); - init_logger(endpoint, logging.level, logging.echo_stdout).expect("init fastly logger"); + init_logger(endpoint, logging.level, logging.echo_stdout)?; } let app = A::build_app(); - crate::request::dispatch_with_store_names( + request::dispatch_with_store_names( &app, req, config_store_name, @@ -199,16 +211,18 @@ fn run_app_with_stores( ) } -#[cfg(all(test, feature = "fastly"))] +#[cfg(test)] +#[cfg(feature = "fastly")] mod tests { use super::*; + use edgezero_core::manifest::LogLevel; #[test] fn fastly_logging_from_manifest_converts_defaults() { - let config = edgezero_core::manifest::ResolvedLoggingConfig { - endpoint: Some("endpoint".to_string()), + let config = ResolvedLoggingConfig { echo_stdout: Some(false), - level: edgezero_core::manifest::LogLevel::Debug, + endpoint: Some("endpoint".to_owned()), + level: LogLevel::Debug, }; let logging: FastlyLogging = config.into(); diff --git a/crates/edgezero-adapter-fastly/src/logger.rs b/crates/edgezero-adapter-fastly/src/logger.rs index 1fe4716..1b040ea 100644 --- a/crates/edgezero-adapter-fastly/src/logger.rs +++ b/crates/edgezero-adapter-fastly/src/logger.rs @@ -1,18 +1,37 @@ use log::LevelFilter; +/// Errors that can occur when initialising the Fastly logger. +#[derive(Debug, thiserror::Error)] +pub enum InitLoggerError { + /// The `log_fastly::Logger::builder()` rejected its inputs (e.g. the + /// endpoint string is empty). + #[error("failed to build Fastly logger: {0}")] + Build(String), + /// `log::set_boxed_logger` (via `fern`) failed because a global logger + /// was already installed. + #[error(transparent)] + SetLogger(#[from] log::SetLoggerError), +} + /// Initialize logging (opinionated): formatted timestamps using `fern`, /// chained to the Fastly logger. +/// +/// # Errors +/// Returns [`InitLoggerError::Build`] if the underlying logger builder +/// rejects its inputs (e.g. an empty endpoint), or +/// [`InitLoggerError::SetLogger`] if a global logger is already installed. +#[inline] pub fn init_logger( endpoint: &str, level: LevelFilter, echo_stdout: bool, -) -> Result<(), log::SetLoggerError> { +) -> Result<(), InitLoggerError> { let logger = log_fastly::Logger::builder() .default_endpoint(endpoint) .echo_stdout(echo_stdout) .max_level(level) .build() - .expect("failed to build Fastly logger"); + .map_err(|err| InitLoggerError::Build(err.to_string()))?; // Format timestamps in RFC3339 with milliseconds using UTC to avoid TZ issues in WASM. let dispatch = fern::Dispatch::new() @@ -23,9 +42,12 @@ pub fn init_logger( chrono::Utc::now().to_rfc3339_opts(chrono::SecondsFormat::Millis, true), record.level(), message - )) + )); }) - .chain(Box::new(logger) as Box); + .chain({ + let boxed: Box = Box::new(logger); + boxed + }); dispatch.apply()?; log::set_max_level(level); diff --git a/crates/edgezero-adapter-fastly/src/proxy.rs b/crates/edgezero-adapter-fastly/src/proxy.rs index daef275..2947f33 100644 --- a/crates/edgezero-adapter-fastly/src/proxy.rs +++ b/crates/edgezero-adapter-fastly/src/proxy.rs @@ -5,25 +5,28 @@ use edgezero_core::body::Body; use edgezero_core::compression::{decode_brotli_stream, decode_gzip_stream}; use edgezero_core::error::EdgeError; use edgezero_core::http::{header, HeaderMap, HeaderValue, Method, Uri}; -use edgezero_core::proxy::{ProxyClient, ProxyRequest, ProxyResponse}; +use edgezero_core::proxy::{ProxyClient, ProxyRequest, ProxyResponse, PROXY_HEADER}; use fastly::{ error::anyhow, http::body::StreamingBody, Backend, Request as FastlyRequest, Response as FastlyResponse, }; -use futures_util::stream::{BoxStream, StreamExt}; -use std::io::{self, Write}; +use futures_util::stream::{BoxStream, StreamExt as _}; +use std::io::{self, Write as _}; use std::time::Duration; const BACKEND_PREFIX: &str = "edgezero-dynamic-"; +type ChunkStream = BoxStream<'static, Result, io::Error>>; + pub struct FastlyProxyClient; #[async_trait(?Send)] impl ProxyClient for FastlyProxyClient { + #[inline] async fn send(&self, request: ProxyRequest) -> Result { let (method, uri, headers, body, _ext) = request.into_parts(); let backend_name = ensure_backend(&uri)?; - let fastly_request = build_fastly_request(method, &uri, headers)?; + let fastly_request = build_fastly_request(method, &uri, &headers); let (mut streaming_body, pending_request) = fastly_request .send_async_streaming(&backend_name) .map_err(EdgeError::internal)?; @@ -31,24 +34,19 @@ impl ProxyClient for FastlyProxyClient { streaming_body.finish().map_err(EdgeError::internal)?; let mut fastly_response = pending_request.wait().map_err(EdgeError::internal)?; - let mut proxy_response = convert_response(&mut fastly_response)?; - proxy_response.headers_mut().insert( - edgezero_core::proxy::PROXY_HEADER, - HeaderValue::from_static("fastly"), - ); + let mut proxy_response = convert_response(&mut fastly_response); + proxy_response + .headers_mut() + .insert(PROXY_HEADER, HeaderValue::from_static("fastly")); Ok(proxy_response) } } -fn build_fastly_request( - method: Method, - uri: &Uri, - headers: HeaderMap, -) -> Result { +fn build_fastly_request(method: Method, uri: &Uri, headers: &HeaderMap) -> FastlyRequest { let mut fastly_request = FastlyRequest::new(method.clone(), uri.to_string()); fastly_request.set_method(method); - for (name, value) in headers.iter() { + for (name, value) in headers { if name.as_str().eq_ignore_ascii_case("host") { continue; } @@ -59,34 +57,38 @@ fn build_fastly_request( fastly_request.set_header("Host", host); } - Ok(fastly_request) + fastly_request } -async fn forward_request_body( - body: Body, - streaming_body: &mut StreamingBody, -) -> Result<(), EdgeError> { - match body { - Body::Once(bytes) => { - if !bytes.is_empty() { - streaming_body - .write_all(bytes.as_ref()) - .map_err(EdgeError::internal)?; - } - } - Body::Stream(mut stream) => { - while let Some(chunk) = stream.next().await { - let chunk = chunk.map_err(EdgeError::internal)?; - streaming_body - .write_all(&chunk) - .map_err(EdgeError::internal)?; - } +fn convert_response(fastly_response: &mut FastlyResponse) -> ProxyResponse { + let status = fastly_response.get_status(); + let mut proxy_response = ProxyResponse::new(status, Body::empty()); + + for header in fastly_response.get_header_names() { + if let Some(value) = fastly_response.get_header(header) { + proxy_response.headers_mut().insert(header, value.clone()); } } - streaming_body.flush().map_err(EdgeError::internal)?; + let encoding = proxy_response + .headers() + .get(header::CONTENT_ENCODING) + .and_then(|value| value.to_str().ok()) + .map(str::to_ascii_lowercase); - Ok(()) + let body = fastly_response.take_body(); + + let chunk_stream = fastly_body_stream(body); + let body_stream = transform_stream(chunk_stream, encoding.as_deref()); + *proxy_response.body_mut() = Body::from_stream(body_stream); + if encoding.as_deref() == Some("gzip") || encoding.as_deref() == Some("br") { + proxy_response + .headers_mut() + .remove(header::CONTENT_ENCODING); + proxy_response.headers_mut().remove(header::CONTENT_LENGTH); + } + + proxy_response } fn ensure_backend(uri: &Uri) -> Result { @@ -98,15 +100,15 @@ fn ensure_backend(uri: &Uri) -> Result { let is_https = scheme.eq_ignore_ascii_case("https"); let target_port = match (uri.port_u16(), is_https) { - (Some(p), _) => p, + (Some(port), _) => port, (None, true) => 443, (None, false) => 80, }; - let host_with_port = format!("{}:{}", host, target_port); + let host_with_port = format!("{host}:{target_port}"); // Human-readable name: backend_{scheme}_{host}_{port} with dots/colons sanitised - let name_base = format!("{}_{}_{}", scheme, host, target_port); + let name_base = format!("{scheme}_{host}_{target_port}"); let backend_name = format!("{}{}", BACKEND_PREFIX, name_base.replace(['.', ':'], "_")); let mut builder = Backend::builder(&backend_name, &host_with_port) @@ -120,78 +122,65 @@ fn ensure_backend(uri: &Uri) -> Result { .enable_ssl() .sni_hostname(host) .check_certificate(host); - log::debug!("enable ssl for backend: {}", backend_name); + log::debug!("enable ssl for backend: {backend_name}"); } match builder.finish() { Ok(_) => { - log::debug!( - "created dynamic backend: {} -> {}", - backend_name, - host_with_port - ); + log::debug!("created dynamic backend: {backend_name} -> {host_with_port}"); Ok(backend_name) } - Err(e) => { - let msg = e.to_string(); + Err(err) => { + let msg = err.to_string(); if msg.contains("NameInUse") || msg.contains("already in use") { - log::debug!("reusing existing dynamic backend: {}", backend_name); + log::debug!("reusing existing dynamic backend: {backend_name}"); Ok(backend_name) } else { Err(EdgeError::internal(anyhow!( - "dynamic backend creation failed ({} -> {}): {}", - backend_name, - host_with_port, - msg + "dynamic backend creation failed ({backend_name} -> {host_with_port}): {msg}" ))) } } } } -fn convert_response(fastly_response: &mut FastlyResponse) -> Result { - let status = fastly_response.get_status(); - let mut proxy_response = ProxyResponse::new(status, Body::empty()); - - for header in fastly_response.get_header_names() { - if let Some(value) = fastly_response.get_header(header) { - proxy_response.headers_mut().insert(header, value.clone()); - } - } - - let encoding = proxy_response - .headers() - .get(header::CONTENT_ENCODING) - .and_then(|value| value.to_str().ok()) - .map(|value| value.to_ascii_lowercase()); - - let body = fastly_response.take_body(); - - let chunk_stream = fastly_body_stream(body); - let body_stream = transform_stream(chunk_stream, encoding.as_deref()); - *proxy_response.body_mut() = Body::from_stream(body_stream); - if encoding.as_deref() == Some("gzip") || encoding.as_deref() == Some("br") { - proxy_response - .headers_mut() - .remove(header::CONTENT_ENCODING); - proxy_response.headers_mut().remove(header::CONTENT_LENGTH); - } - - Ok(proxy_response) -} - -type ChunkStream = BoxStream<'static, Result, io::Error>>; - fn fastly_body_stream(mut body: fastly::Body) -> ChunkStream { try_stream! { - for chunk in body.read_chunks(8 * 1024) { - let chunk = chunk?; + for result in body.read_chunks(8 * 1024) { + let chunk = result?; yield chunk; } } .boxed() } +async fn forward_request_body( + body: Body, + streaming_body: &mut StreamingBody, +) -> Result<(), EdgeError> { + match body { + Body::Once(bytes) => { + if !bytes.is_empty() { + streaming_body + .write_all(bytes.as_ref()) + .map_err(EdgeError::internal)?; + } + } + Body::Stream(mut stream) => { + while let Some(result) = stream.next().await { + let chunk = result.map_err(EdgeError::internal)?; + streaming_body + .write_all(&chunk) + .map_err(EdgeError::internal)?; + } + } + } + + streaming_body.flush().map_err(EdgeError::internal)?; + + Ok(()) +} + fn transform_stream( stream: ChunkStream, encoding: Option<&str>, @@ -209,33 +198,26 @@ mod tests { use brotli::CompressorWriter; use flate2::{write::GzEncoder, Compression}; use futures::executor::block_on; - use std::io::Write; - #[test] - fn stream_handles_identity_and_gzip() { - let mut plain = fastly::Body::new(); - plain.write_all(b"plain").unwrap(); - let body = Body::from_stream(transform_stream(fastly_body_stream(plain), None)); - let collected = collect_body(body); - assert_eq!(collected, b"plain"); - - let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); - encoder.write_all(b"hello gzip").unwrap(); - let compressed = encoder.finish().unwrap(); - let mut gz_body = fastly::Body::new(); - gz_body.write_all(&compressed).unwrap(); - let body = Body::from_stream(transform_stream(fastly_body_stream(gz_body), Some("gzip"))); - let collected = collect_body(body); - assert_eq!(collected, b"hello gzip"); + fn collect_body(body: Body) -> Vec { + match body { + Body::Once(bytes) => bytes.to_vec(), + Body::Stream(mut stream) => block_on(async { + let mut out = Vec::new(); + while let Some(chunk) = stream.next().await { + out.extend_from_slice(&chunk.expect("chunk")); + } + out + }), + } } #[test] fn stream_handles_brotli() { let mut compressed = Vec::new(); - { - let mut compressor = CompressorWriter::new(&mut compressed, 4096, 5, 21); - compressor.write_all(b"hello brotli").unwrap(); - } + let mut compressor = CompressorWriter::new(&mut compressed, 4096, 5, 21); + compressor.write_all(b"hello brotli").unwrap(); + drop(compressor); let mut br_body = fastly::Body::new(); br_body.write_all(&compressed).unwrap(); @@ -244,16 +226,20 @@ mod tests { assert_eq!(collected, b"hello brotli"); } - fn collect_body(body: Body) -> Vec { - match body { - Body::Once(bytes) => bytes.to_vec(), - Body::Stream(mut stream) => block_on(async { - let mut out = Vec::new(); - while let Some(chunk) = stream.next().await { - out.extend_from_slice(&chunk.expect("chunk")); - } - out - }), - } + #[test] + fn stream_handles_identity_and_gzip() { + let mut plain = fastly::Body::new(); + plain.write_all(b"plain").unwrap(); + let plain_body = Body::from_stream(transform_stream(fastly_body_stream(plain), None)); + assert_eq!(collect_body(plain_body), b"plain"); + + let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); + encoder.write_all(b"hello gzip").unwrap(); + let compressed = encoder.finish().unwrap(); + let mut gz_body = fastly::Body::new(); + gz_body.write_all(&compressed).unwrap(); + let gzip_body = + Body::from_stream(transform_stream(fastly_body_stream(gz_body), Some("gzip"))); + assert_eq!(collect_body(gzip_body), b"hello gzip"); } } diff --git a/crates/edgezero-adapter-fastly/src/request.rs b/crates/edgezero-adapter-fastly/src/request.rs index 59f7f97..84fae3f 100644 --- a/crates/edgezero-adapter-fastly/src/request.rs +++ b/crates/edgezero-adapter-fastly/src/request.rs @@ -1,6 +1,7 @@ use std::collections::{HashSet, VecDeque}; -use std::io::Read; -use std::sync::{Arc, Mutex, OnceLock}; +use std::fmt::Display; +use std::io::Read as _; +use std::sync::{Arc, Mutex, OnceLock, PoisonError}; use edgezero_core::app::App; use edgezero_core::body::Body; @@ -8,19 +9,49 @@ use edgezero_core::config_store::ConfigStoreHandle; use edgezero_core::error::EdgeError; use edgezero_core::http::{request_builder, Request}; use edgezero_core::key_value_store::KvHandle; +use edgezero_core::manifest::DEFAULT_KV_STORE_NAME as CORE_DEFAULT_KV_STORE_NAME; use edgezero_core::proxy::ProxyHandle; use edgezero_core::secret_store::SecretHandle; use fastly::{Error as FastlyError, Request as FastlyRequest, Response as FastlyResponse}; use futures::executor; use crate::config_store::FastlyConfigStore; +use crate::context::FastlyRequestContext; use crate::key_value_store::FastlyKvStore; use crate::proxy::FastlyProxyClient; use crate::response::{from_core_response, parse_uri}; -use crate::FastlyRequestContext; +use crate::secret_store::FastlySecretStore; + +/// Default Fastly KV Store name. +/// +/// If a KV Store with this name exists in your Fastly service, it will +/// be automatically available to handlers via the `Kv` extractor. +pub const DEFAULT_KV_STORE_NAME: &str = CORE_DEFAULT_KV_STORE_NAME; const WARNED_STORE_CACHE_LIMIT: usize = 64; +#[derive(Default)] +struct RecentStringSet { + keys: HashSet, + order: VecDeque, +} + +impl RecentStringSet { + fn insert(&mut self, key: &str, limit: usize) -> bool { + let owned = key.to_owned(); + if !self.keys.insert(owned.clone()) { + return false; + } + self.order.push_back(owned); + while limit > 0 && self.order.len() > limit { + if let Some(oldest) = self.order.pop_front() { + self.keys.remove(&oldest); + } + } + true + } +} + /// Groups the optional per-request store handles injected at dispatch time. /// /// Use `..Default::default()` for fields you do not need: @@ -29,48 +60,10 @@ const WARNED_STORE_CACHE_LIMIT: usize = 64; /// let stores = Stores { kv: Some(kv_handle), ..Default::default() }; /// ``` #[derive(Default)] -pub(crate) struct Stores { - pub(crate) config_store: Option, - pub(crate) kv: Option, - pub(crate) secrets: Option, -} - -/// Default Fastly KV Store name. -/// -/// If a KV Store with this name exists in your Fastly service, it will -/// be automatically available to handlers via the `Kv` extractor. -pub const DEFAULT_KV_STORE_NAME: &str = edgezero_core::manifest::DEFAULT_KV_STORE_NAME; - -pub fn into_core_request(mut req: FastlyRequest) -> Result { - let method = req.get_method().clone(); - let uri = parse_uri(req.get_url_str())?; - - let mut builder = request_builder().method(method).uri(uri); - for (name, value) in req.get_headers() { - builder = builder.header(name.as_str(), value.as_bytes()); - } - - let mut body = req.take_body(); - let mut bytes = Vec::new(); - body.read_to_end(&mut bytes).map_err(EdgeError::internal)?; - - let mut request = builder - .body(Body::from(bytes)) - .map_err(EdgeError::internal)?; - - let context = FastlyRequestContext { - client_ip: req.get_client_ip_addr(), - }; - FastlyRequestContext::insert(&mut request, context); - request - .extensions_mut() - .insert(ProxyHandle::with_client(FastlyProxyClient)); - - Ok(request) -} - -pub(crate) fn dispatch_raw(app: &App, req: FastlyRequest) -> Result { - dispatch_with_kv(app, req, DEFAULT_KV_STORE_NAME, false) +struct Stores { + config_store: Option, + kv: Option, + secrets: Option, } /// Low-level manual dispatch. @@ -82,70 +75,117 @@ pub(crate) fn dispatch_raw(app: &App, req: FastlyRequest) -> Result Result { dispatch_raw(app, req) } -/// Dispatch a request with a prepared config-store handle injected into extensions. +fn dispatch_core_request( + app: &App, + mut core_request: Request, + stores: Stores, +) -> Result { + if let Some(handle) = stores.config_store { + core_request.extensions_mut().insert(handle); + } + if let Some(handle) = stores.kv { + core_request.extensions_mut().insert(handle); + } + if let Some(handle) = stores.secrets { + core_request.extensions_mut().insert(handle); + } + let response = executor::block_on(app.router().oneshot(core_request)) + .map_err(|err| map_edge_error(&err))?; + from_core_response(response).map_err(|err| map_edge_error(&err)) +} + +pub(crate) fn dispatch_raw(app: &App, req: FastlyRequest) -> Result { + dispatch_with_kv(app, req, DEFAULT_KV_STORE_NAME, false) +} + +/// Dispatch a request with a Fastly Config Store injected into extensions. /// -/// This is the advanced/manual path. Prefer `dispatch_with_config` when you -/// want the adapter to resolve the configured backend for you. +/// If the named store is not available, suppresses repeated warnings for +/// recently seen store names and dispatches without it. /// /// The KV store named [`DEFAULT_KV_STORE_NAME`] is also resolved and injected /// (non-required: unavailable stores are silently skipped). -pub fn dispatch_with_config_handle( +/// +/// # Errors +/// Returns an error if the named config store cannot be opened or the underlying handler returns an error. +#[inline] +pub fn dispatch_with_config( app: &App, req: FastlyRequest, - config_store_handle: ConfigStoreHandle, + store_name: &str, ) -> Result { + let config_store_handle = match FastlyConfigStore::try_open(store_name) { + Ok(store) => Some(ConfigStoreHandle::new(Arc::new(store))), + Err(err) => { + warn_missing_store_once(store_name, &err.to_string()); + None + } + }; let kv = resolve_kv_handle(DEFAULT_KV_STORE_NAME, false)?; dispatch_with_handles( app, req, Stores { - config_store: Some(config_store_handle), + config_store: config_store_handle, kv, ..Default::default() }, ) } -/// Dispatch a request with a Fastly Config Store injected into extensions. +/// Dispatch a request with a prepared config-store handle injected into extensions. /// -/// If the named store is not available, suppresses repeated warnings for -/// recently seen store names and dispatches without it. +/// This is the advanced/manual path. Prefer `dispatch_with_config` when you +/// want the adapter to resolve the configured backend for you. /// /// The KV store named [`DEFAULT_KV_STORE_NAME`] is also resolved and injected /// (non-required: unavailable stores are silently skipped). -pub fn dispatch_with_config( +/// +/// # Errors +/// Returns an error if request conversion fails or the underlying handler returns an error. +#[inline] +pub fn dispatch_with_config_handle( app: &App, req: FastlyRequest, - store_name: &str, + config_store_handle: ConfigStoreHandle, ) -> Result { - let config_store_handle = match FastlyConfigStore::try_open(store_name) { - Ok(store) => Some(ConfigStoreHandle::new(Arc::new(store))), - Err(err) => { - warn_missing_store_once(store_name, &err.to_string()); - None - } - }; let kv = resolve_kv_handle(DEFAULT_KV_STORE_NAME, false)?; dispatch_with_handles( app, req, Stores { - config_store: config_store_handle, + config_store: Some(config_store_handle), kv, ..Default::default() }, ) } +fn dispatch_with_handles( + app: &App, + req: FastlyRequest, + stores: Stores, +) -> Result { + let core_request = into_core_request(req).map_err(|err| map_edge_error(&err))?; + dispatch_core_request(app, core_request, stores) +} + /// Dispatch a Fastly request with a custom KV store name. /// /// `kv_required` should be `true` when `[stores.kv]` is explicitly present /// in the manifest, causing the request to fail if the store is unavailable /// rather than silently degrading. +/// +/// # Errors +/// Returns an error if the named KV store cannot be opened or the underlying handler returns an error. +#[inline] pub fn dispatch_with_kv( app: &App, req: FastlyRequest, @@ -163,96 +203,44 @@ pub fn dispatch_with_kv( ) } -pub(crate) fn dispatch_with_store_names( +/// Dispatch a Fastly request with both KV and secret stores attached. +/// +/// For most applications, prefer [`crate::run_app`] which resolves all stores +/// from the manifest automatically. Use `dispatch_with_kv_and_secrets` only +/// when you need direct control over the dispatch lifecycle without a manifest. +/// +/// # Errors +/// Returns an error if a required store cannot be opened or the underlying handler returns an error. +#[inline] +pub fn dispatch_with_kv_and_secrets( app: &App, req: FastlyRequest, - config_store_name: Option<&str>, kv_store_name: &str, kv_required: bool, secrets_required: bool, ) -> Result { - let config_store_handle = match config_store_name { - Some(store_name) => match FastlyConfigStore::try_open(store_name) { - Ok(store) => Some(ConfigStoreHandle::new(Arc::new(store))), - Err(err) => { - warn_missing_store_once(store_name, &err.to_string()); - None - } - }, - None => None, - }; let kv = resolve_kv_handle(kv_store_name, kv_required)?; let secrets = resolve_secret_handle(secrets_required); dispatch_with_handles( app, req, Stores { - config_store: config_store_handle, kv, secrets, + ..Default::default() }, ) } -fn warn_missing_once( - cache: &'static OnceLock>, - item_type: &str, - name: &str, - detail: &impl std::fmt::Display, -) { - let set = cache.get_or_init(|| Mutex::new(RecentStringSet::default())); - let mut guard = set.lock().unwrap_or_else(|poisoned| poisoned.into_inner()); - if guard.insert(name, WARNED_STORE_CACHE_LIMIT) { - log::warn!("{} '{}' not available: {}", item_type, name, detail); - } -} - -fn warn_missing_store_once(store_name: &str, detail: &str) { - static WARNED_STORES: OnceLock> = OnceLock::new(); - warn_missing_once( - &WARNED_STORES, - "configured Fastly config store", - store_name, - &format!("{}; skipping config-store injection", detail), - ); -} - -#[derive(Default)] -struct RecentStringSet { - keys: HashSet, - order: VecDeque, -} - -impl RecentStringSet { - fn insert(&mut self, key: &str, limit: usize) -> bool { - let owned = key.to_string(); - if !self.keys.insert(owned.clone()) { - return false; - } - self.order.push_back(owned); - while limit > 0 && self.order.len() > limit { - if let Some(oldest) = self.order.pop_front() { - self.keys.remove(&oldest); - } - } - true - } -} - -fn map_edge_error(err: EdgeError) -> FastlyError { - FastlyError::msg(err.to_string()) -} - -fn warn_missing_kv_store_once(kv_store_name: &str, error: &impl std::fmt::Display) { - static WARNED_KV_STORES: OnceLock> = OnceLock::new(); - warn_missing_once(&WARNED_KV_STORES, "KV store", kv_store_name, error); -} - /// Dispatch a Fastly request with a secret store attached. /// /// For most applications, prefer [`crate::run_app`] which resolves all stores /// from the manifest automatically. Use `dispatch_with_secrets` only when you /// need direct control over the dispatch lifecycle without a manifest. +/// +/// # Errors +/// Returns an error if the named secret store is required but cannot be opened, or the underlying handler returns an error. +#[inline] pub fn dispatch_with_secrets( app: &App, req: FastlyRequest, @@ -269,82 +257,121 @@ pub fn dispatch_with_secrets( ) } -/// Dispatch a Fastly request with both KV and secret stores attached. -/// -/// For most applications, prefer [`crate::run_app`] which resolves all stores -/// from the manifest automatically. Use `dispatch_with_kv_and_secrets` only -/// when you need direct control over the dispatch lifecycle without a manifest. -pub fn dispatch_with_kv_and_secrets( +pub(crate) fn dispatch_with_store_names( app: &App, req: FastlyRequest, + config_store_name: Option<&str>, kv_store_name: &str, kv_required: bool, secrets_required: bool, ) -> Result { + let config_store_handle = match config_store_name { + Some(store_name) => match FastlyConfigStore::try_open(store_name) { + Ok(store) => Some(ConfigStoreHandle::new(Arc::new(store))), + Err(err) => { + warn_missing_store_once(store_name, &err.to_string()); + None + } + }, + None => None, + }; let kv = resolve_kv_handle(kv_store_name, kv_required)?; let secrets = resolve_secret_handle(secrets_required); dispatch_with_handles( app, req, Stores { + config_store: config_store_handle, kv, secrets, - ..Default::default() }, ) } -pub(crate) fn dispatch_with_handles( - app: &App, - req: FastlyRequest, - stores: Stores, -) -> Result { - let core_request = into_core_request(req).map_err(map_edge_error)?; - dispatch_core_request(app, core_request, stores) -} +/// # Errors +/// Returns [`EdgeError::Internal`] if the Fastly request cannot be reconstituted into a core request (e.g., method or URI conversion failure). +#[inline] +pub fn into_core_request(mut req: FastlyRequest) -> Result { + let method = req.get_method().clone(); + let uri = parse_uri(req.get_url_str())?; -fn dispatch_core_request( - app: &App, - mut core_request: Request, - stores: Stores, -) -> Result { - if let Some(handle) = stores.config_store { - core_request.extensions_mut().insert(handle); - } - if let Some(handle) = stores.kv { - core_request.extensions_mut().insert(handle); - } - if let Some(handle) = stores.secrets { - core_request.extensions_mut().insert(handle); + let mut builder = request_builder().method(method).uri(uri); + for (name, value) in req.get_headers() { + builder = builder.header(name.as_str(), value.as_bytes()); } - let response = executor::block_on(app.router().oneshot(core_request)); - from_core_response(response).map_err(map_edge_error) + + let mut body = req.take_body(); + let mut bytes = Vec::new(); + body.read_to_end(&mut bytes).map_err(EdgeError::internal)?; + + let mut request = builder + .body(Body::from(bytes)) + .map_err(EdgeError::internal)?; + + let context = FastlyRequestContext { + client_ip: req.get_client_ip_addr(), + }; + FastlyRequestContext::insert(&mut request, context); + request + .extensions_mut() + .insert(ProxyHandle::with_client(FastlyProxyClient)); + + Ok(request) } -pub(crate) fn resolve_kv_handle( +fn map_edge_error(err: &EdgeError) -> FastlyError { + FastlyError::msg(err.to_string()) +} + +fn resolve_kv_handle( kv_store_name: &str, kv_required: bool, ) -> Result, FastlyError> { match FastlyKvStore::open(kv_store_name) { - Ok(store) => Ok(Some(KvHandle::new(std::sync::Arc::new(store)))), - Err(e) => { + Ok(store) => Ok(Some(KvHandle::new(Arc::new(store)))), + Err(err) => { if kv_required { return Err(FastlyError::msg(format!( - "KV store '{}' is explicitly configured but could not be opened: {}", - kv_store_name, e + "KV store '{kv_store_name}' is explicitly configured but could not be opened: {err}" ))); } - warn_missing_kv_store_once(kv_store_name, &e); + warn_missing_kv_store_once(kv_store_name, &err); Ok(None) } } } -pub(crate) fn resolve_secret_handle(secrets_required: bool) -> Option { +fn resolve_secret_handle(secrets_required: bool) -> Option { if !secrets_required { return None; } - Some(SecretHandle::new(std::sync::Arc::new( - crate::secret_store::FastlySecretStore, - ))) + Some(SecretHandle::new(Arc::new(FastlySecretStore))) +} + +fn warn_missing_kv_store_once(kv_store_name: &str, error: &impl Display) { + static WARNED_KV_STORES: OnceLock> = OnceLock::new(); + warn_missing_once(&WARNED_KV_STORES, "KV store", kv_store_name, error); +} + +fn warn_missing_once( + cache: &'static OnceLock>, + item_type: &str, + name: &str, + detail: &impl Display, +) { + let set = cache.get_or_init(|| Mutex::new(RecentStringSet::default())); + let mut guard = set.lock().unwrap_or_else(PoisonError::into_inner); + if guard.insert(name, WARNED_STORE_CACHE_LIMIT) { + log::warn!("{item_type} '{name}' not available: {detail}"); + } +} + +fn warn_missing_store_once(store_name: &str, detail: &str) { + static WARNED_STORES: OnceLock> = OnceLock::new(); + warn_missing_once( + &WARNED_STORES, + "configured Fastly config store", + store_name, + &format!("{detail}; skipping config-store injection"), + ); } diff --git a/crates/edgezero-adapter-fastly/src/response.rs b/crates/edgezero-adapter-fastly/src/response.rs index 617c501..075b235 100644 --- a/crates/edgezero-adapter-fastly/src/response.rs +++ b/crates/edgezero-adapter-fastly/src/response.rs @@ -2,9 +2,13 @@ use edgezero_core::body::Body; use edgezero_core::error::EdgeError; use edgezero_core::http::{Response, Uri}; use fastly::Response as FastlyResponse; -use futures_util::StreamExt; -use std::io::Write; +use futures::executor; +use futures_util::StreamExt as _; +use std::io::Write as _; +/// # Errors +/// Returns [`EdgeError::Internal`] if the response body cannot be streamed to the Fastly send-channel. +#[inline] pub fn from_core_response(response: Response) -> Result { let (parts, body) = response.into_parts(); let mut fastly_response = FastlyResponse::from_status(parts.status.as_u16()); @@ -13,24 +17,24 @@ pub fn from_core_response(response: Response) -> Result fastly_response.set_body(bytes.to_vec()), Body::Stream(mut stream) => { let mut fastly_body = fastly::Body::new(); - while let Some(chunk) = futures::executor::block_on(stream.next()) { - let chunk = chunk.map_err(EdgeError::internal)?; + while let Some(result) = executor::block_on(stream.next()) { + let chunk = result.map_err(EdgeError::internal)?; fastly_body.write_all(&chunk).map_err(EdgeError::internal)?; } fastly_response.set_body(fastly_body); } } - for (name, value) in parts.headers.iter() { + for (name, value) in &parts.headers { fastly_response.set_header(name.as_str(), value.as_bytes()); } Ok(fastly_response) } -pub fn parse_uri(uri: &str) -> Result { +pub(crate) fn parse_uri(uri: &str) -> Result { uri.parse::() - .map_err(|err| EdgeError::bad_request(format!("invalid request URI: {}", err))) + .map_err(|err| EdgeError::bad_request(format!("invalid request URI: {err}"))) } #[cfg(test)] diff --git a/crates/edgezero-adapter-fastly/src/secret_store.rs b/crates/edgezero-adapter-fastly/src/secret_store.rs index 6458aa0..e83b2b3 100644 --- a/crates/edgezero-adapter-fastly/src/secret_store.rs +++ b/crates/edgezero-adapter-fastly/src/secret_store.rs @@ -1,7 +1,7 @@ //! Fastly secret store adapter. //! //! Implements `edgezero_core::secret_store::SecretStore` via -//! `FastlySecretStore`, which opens a named Fastly SecretStore on +//! `FastlySecretStore`, which opens a named Fastly `SecretStore` on //! each lookup. #[cfg(feature = "fastly")] @@ -9,48 +9,53 @@ use async_trait::async_trait; #[cfg(feature = "fastly")] use bytes::Bytes; #[cfg(feature = "fastly")] -use edgezero_core::secret_store::SecretError; +use edgezero_core::secret_store::{SecretError, SecretStore}; +#[cfg(feature = "fastly")] +use fastly::secret_store::SecretStore as FastlyNativeSecretStore; -/// Internal helper that opens a single named Fastly SecretStore. +/// Internal helper that opens a single named Fastly `SecretStore`. #[cfg(feature = "fastly")] pub struct FastlyNamedStore { - store: fastly::secret_store::SecretStore, + store: FastlyNativeSecretStore, } #[cfg(feature = "fastly")] impl FastlyNamedStore { - /// Open a Fastly SecretStore by name. + pub(crate) fn get_bytes_sync(&self, key: &str) -> Result, SecretError> { + let lookup = self + .store + .try_get(key) + .map_err(|err| SecretError::Internal(anyhow::anyhow!("secret lookup failed: {err}")))?; + + match lookup { + Some(secret) => secret.try_plaintext().map(Some).map_err(|err| { + SecretError::Internal(anyhow::anyhow!("secret decryption failed: {err}")) + }), + None => Ok(None), + } + } + + /// Open a Fastly `SecretStore` by name. /// /// Returns `SecretError::Internal` if the store does not exist or cannot - /// be opened. Unlike `KVStore::open`, the Fastly SecretStore API returns + /// be opened. Unlike `KVStore::open`, the Fastly `SecretStore` API returns /// `Result` (not `Result, _>`), so there /// is no `ok_or` unwrap here. + /// + /// # Errors + /// Returns [`SecretError::Internal`] if the named secret store cannot be opened. + #[inline] pub fn open(name: &str) -> Result { - let store = fastly::secret_store::SecretStore::open(name).map_err(|e| { + let store = FastlyNativeSecretStore::open(name).map_err(|err| { SecretError::Internal(anyhow::anyhow!( - "failed to open secret store '{}': {e}", - name + "failed to open secret store '{name}': {err}" )) })?; Ok(Self { store }) } - - pub(crate) fn get_bytes_sync(&self, key: &str) -> Result, SecretError> { - let secret = self - .store - .try_get(key) - .map_err(|e| SecretError::Internal(anyhow::anyhow!("secret lookup failed: {e}")))?; - - match secret { - Some(secret) => secret.try_plaintext().map(Some).map_err(|e| { - SecretError::Internal(anyhow::anyhow!("secret decryption failed: {e}")) - }), - None => Ok(None), - } - } } -/// Multi-store provider backed by Fastly's SecretStore API. +/// Multi-store provider backed by Fastly's `SecretStore` API. /// /// Opens the named store per call — `FastlyNamedStore::open` is cheap /// (no network; just a handle) so there is no caching. @@ -59,12 +64,9 @@ pub struct FastlySecretStore; #[cfg(feature = "fastly")] #[async_trait(?Send)] -impl edgezero_core::secret_store::SecretStore for FastlySecretStore { - async fn get_bytes( - &self, - store_name: &str, - key: &str, - ) -> Result, edgezero_core::secret_store::SecretError> { +impl SecretStore for FastlySecretStore { + #[inline] + async fn get_bytes(&self, store_name: &str, key: &str) -> Result, SecretError> { let store = FastlyNamedStore::open(store_name)?; store.get_bytes_sync(key) } diff --git a/crates/edgezero-adapter-fastly/src/templates/Cargo.toml.hbs b/crates/edgezero-adapter-fastly/src/templates/Cargo.toml.hbs index 238463d..b8cf4b8 100644 --- a/crates/edgezero-adapter-fastly/src/templates/Cargo.toml.hbs +++ b/crates/edgezero-adapter-fastly/src/templates/Cargo.toml.hbs @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" publish = false +[lints] +workspace = true + [[bin]] name = "{{proj_fastly}}" path = "src/main.rs" diff --git a/crates/edgezero-adapter-fastly/tests/contract.rs b/crates/edgezero-adapter-fastly/tests/contract.rs index edb2498..3388b55 100644 --- a/crates/edgezero-adapter-fastly/tests/contract.rs +++ b/crates/edgezero-adapter-fastly/tests/contract.rs @@ -3,10 +3,9 @@ #![allow(deprecated)] use bytes::Bytes; -use edgezero_adapter_fastly::{ - dispatch, dispatch_with_config_handle, from_core_response, into_core_request, - FastlyRequestContext, -}; +use edgezero_adapter_fastly::context::FastlyRequestContext; +use edgezero_adapter_fastly::request::{dispatch, dispatch_with_config_handle, into_core_request}; +use edgezero_adapter_fastly::response::from_core_response; use edgezero_core::app::App; use edgezero_core::body::Body; use edgezero_core::config_store::{ConfigStore, ConfigStoreError, ConfigStoreHandle}; @@ -38,7 +37,7 @@ fn build_test_app() -> App { } async fn mirror_body(ctx: RequestContext) -> Result { - let bytes = ctx.request().body().as_bytes().to_vec(); + let bytes = ctx.request().body().as_bytes().expect("buffered").to_vec(); let response = response_builder() .status(StatusCode::OK) .body(Body::from(bytes)) @@ -112,7 +111,10 @@ fn into_core_request_preserves_method_uri_headers_body_and_context() { Some("1") ); - assert_eq!(core_request.body().as_bytes(), b"payload"); + assert_eq!( + core_request.body().as_bytes().expect("buffered"), + b"payload" + ); let context = FastlyRequestContext::get(&core_request).expect("context"); assert_eq!(context.client_ip, expected_ip); @@ -184,7 +186,7 @@ fn dispatch_with_config_handle_injects_handle() { #[cfg(all(feature = "fastly", target_arch = "wasm32"))] mod secret_store_compile_check { - use edgezero_adapter_fastly::FastlySecretStore; + use edgezero_adapter_fastly::secret_store::FastlySecretStore; use edgezero_core::secret_store::SecretStore; fn _assert_provider_impl() {} diff --git a/crates/edgezero-adapter-spin/Cargo.toml b/crates/edgezero-adapter-spin/Cargo.toml index 090daad..b8259b5 100644 --- a/crates/edgezero-adapter-spin/Cargo.toml +++ b/crates/edgezero-adapter-spin/Cargo.toml @@ -5,6 +5,9 @@ version = { workspace = true } authors = { workspace = true } license = { workspace = true } +[lints] +workspace = true + [features] default = [] spin = ["dep:spin-sdk"] diff --git a/crates/edgezero-adapter-spin/src/cli.rs b/crates/edgezero-adapter-spin/src/cli.rs index 1e2cbdd..c6e59b6 100644 --- a/crates/edgezero-adapter-spin/src/cli.rs +++ b/crates/edgezero-adapter-spin/src/cli.rs @@ -1,3 +1,4 @@ +use std::env; use std::fs; use std::path::{Path, PathBuf}; use std::process::Command; @@ -6,157 +7,14 @@ use ctor::ctor; use edgezero_adapter::cli_support::{ find_manifest_upwards, find_workspace_root, path_distance, read_package_name, }; +use edgezero_adapter::registry::{register_adapter, Adapter, AdapterAction}; use edgezero_adapter::scaffold::{ register_adapter_blueprint, AdapterBlueprint, AdapterFileSpec, CommandTemplates, DependencySpec, LoggingDefaults, ManifestSpec, ReadmeInfo, TemplateRegistration, }; -use edgezero_adapter::{register_adapter, Adapter, AdapterAction}; use walkdir::WalkDir; -const TARGET_TRIPLE: &str = "wasm32-wasip1"; - -pub fn build(extra_args: &[String]) -> Result { - let manifest = find_spin_manifest( - std::env::current_dir() - .map_err(|e| e.to_string())? - .as_path(), - )?; - let manifest_dir = manifest - .parent() - .ok_or_else(|| "spin manifest has no parent directory".to_string())?; - let cargo_manifest = manifest_dir.join("Cargo.toml"); - let crate_name = read_package_name(&cargo_manifest)?; - - let status = Command::new("cargo") - .args([ - "build", - "--release", - "--target", - TARGET_TRIPLE, - "--manifest-path", - cargo_manifest - .to_str() - .ok_or("invalid Cargo manifest path")?, - ]) - .args(extra_args) - .status() - .map_err(|e| format!("failed to run cargo build: {e}"))?; - if !status.success() { - return Err(format!("cargo build failed with status {status}")); - } - - let workspace_root = find_workspace_root(manifest_dir); - let artifact = locate_artifact(&workspace_root, manifest_dir, &crate_name)?; - let pkg_dir = workspace_root.join("pkg"); - fs::create_dir_all(&pkg_dir) - .map_err(|e| format!("failed to create {}: {e}", pkg_dir.display()))?; - let dest = pkg_dir.join(format!("{}.wasm", crate_name.replace('-', "_"))); - fs::copy(&artifact, &dest) - .map_err(|e| format!("failed to copy artifact to {}: {e}", dest.display()))?; - - Ok(dest) -} - -pub fn deploy(extra_args: &[String]) -> Result<(), String> { - let manifest = find_spin_manifest( - std::env::current_dir() - .map_err(|e| e.to_string())? - .as_path(), - )?; - let manifest_dir = manifest - .parent() - .ok_or_else(|| "spin manifest has no parent directory".to_string())?; - - let status = Command::new("spin") - .args(["deploy"]) - .args(extra_args) - .current_dir(manifest_dir) - .status() - .map_err(|e| format!("failed to run spin CLI: {e}"))?; - if !status.success() { - return Err(format!("spin deploy failed with status {status}")); - } - - Ok(()) -} - -pub fn serve(extra_args: &[String]) -> Result<(), String> { - let manifest = find_spin_manifest( - std::env::current_dir() - .map_err(|e| e.to_string())? - .as_path(), - )?; - let manifest_dir = manifest - .parent() - .ok_or_else(|| "spin manifest has no parent directory".to_string())?; - - let status = Command::new("spin") - .args(["up"]) - .args(extra_args) - .current_dir(manifest_dir) - .status() - .map_err(|e| format!("failed to run spin CLI: {e}"))?; - if !status.success() { - return Err(format!("spin up failed with status {status}")); - } - - Ok(()) -} - -struct SpinCliAdapter; - -static SPIN_TEMPLATE_REGISTRATIONS: &[TemplateRegistration] = &[ - TemplateRegistration { - name: "spin_Cargo_toml", - contents: include_str!("templates/Cargo.toml.hbs"), - }, - TemplateRegistration { - name: "spin_src_lib_rs", - contents: include_str!("templates/src/lib.rs.hbs"), - }, - TemplateRegistration { - name: "spin_spin_toml", - contents: include_str!("templates/spin.toml.hbs"), - }, -]; - -static SPIN_FILE_SPECS: &[AdapterFileSpec] = &[ - AdapterFileSpec { - template: "spin_Cargo_toml", - output: "Cargo.toml", - }, - AdapterFileSpec { - template: "spin_src_lib_rs", - output: "src/lib.rs", - }, - AdapterFileSpec { - template: "spin_spin_toml", - output: "spin.toml", - }, -]; - -static SPIN_DEPENDENCIES: &[DependencySpec] = &[ - DependencySpec { - key: "dep_edgezero_core_spin", - repo_crate: "crates/edgezero-core", - fallback: "edgezero-core = { git = \"https://git@github.com/stackpop/edgezero.git\", package = \"edgezero-core\", default-features = false }", - features: &[], - }, - DependencySpec { - key: "dep_edgezero_adapter_spin", - repo_crate: "crates/edgezero-adapter-spin", - fallback: - "edgezero-adapter-spin = { git = \"https://git@github.com/stackpop/edgezero.git\", package = \"edgezero-adapter-spin\", default-features = false }", - features: &[], - }, - DependencySpec { - key: "dep_edgezero_adapter_spin_wasm", - repo_crate: "crates/edgezero-adapter-spin", - fallback: - "edgezero-adapter-spin = { git = \"https://git@github.com/stackpop/edgezero.git\", package = \"edgezero-adapter-spin\", default-features = false, features = [\"spin\"] }", - features: &["spin"], - }, -]; +static SPIN_ADAPTER: SpinCliAdapter = SpinCliAdapter; static SPIN_BLUEPRINT: AdapterBlueprint = AdapterBlueprint { id: "spin", @@ -192,34 +50,145 @@ static SPIN_BLUEPRINT: AdapterBlueprint = AdapterBlueprint { run_module: "edgezero_adapter_spin", }; -static SPIN_ADAPTER: SpinCliAdapter = SpinCliAdapter; +static SPIN_DEPENDENCIES: &[DependencySpec] = &[ + DependencySpec { + key: "dep_edgezero_core_spin", + repo_crate: "crates/edgezero-core", + fallback: "edgezero-core = { git = \"https://git@github.com/stackpop/edgezero.git\", package = \"edgezero-core\", default-features = false }", + features: &[], + }, + DependencySpec { + key: "dep_edgezero_adapter_spin", + repo_crate: "crates/edgezero-adapter-spin", + fallback: + "edgezero-adapter-spin = { git = \"https://git@github.com/stackpop/edgezero.git\", package = \"edgezero-adapter-spin\", default-features = false }", + features: &[], + }, + DependencySpec { + key: "dep_edgezero_adapter_spin_wasm", + repo_crate: "crates/edgezero-adapter-spin", + fallback: + "edgezero-adapter-spin = { git = \"https://git@github.com/stackpop/edgezero.git\", package = \"edgezero-adapter-spin\", default-features = false, features = [\"spin\"] }", + features: &["spin"], + }, +]; -impl Adapter for SpinCliAdapter { - fn name(&self) -> &'static str { - "spin" - } +static SPIN_FILE_SPECS: &[AdapterFileSpec] = &[ + AdapterFileSpec { + template: "spin_Cargo_toml", + output: "Cargo.toml", + }, + AdapterFileSpec { + template: "spin_src_lib_rs", + output: "src/lib.rs", + }, + AdapterFileSpec { + template: "spin_spin_toml", + output: "spin.toml", + }, +]; + +static SPIN_TEMPLATE_REGISTRATIONS: &[TemplateRegistration] = &[ + TemplateRegistration { + name: "spin_Cargo_toml", + contents: include_str!("templates/Cargo.toml.hbs"), + }, + TemplateRegistration { + name: "spin_src_lib_rs", + contents: include_str!("templates/src/lib.rs.hbs"), + }, + TemplateRegistration { + name: "spin_spin_toml", + contents: include_str!("templates/spin.toml.hbs"), + }, +]; +const TARGET_TRIPLE: &str = "wasm32-wasip1"; + +struct SpinCliAdapter; + +impl Adapter for SpinCliAdapter { fn execute(&self, action: AdapterAction, args: &[String]) -> Result<(), String> { match action { AdapterAction::Build => { let artifact = build(args)?; - println!("[edgezero] Spin build complete -> {}", artifact.display()); + log::info!("[edgezero] Spin build complete -> {}", artifact.display()); Ok(()) } AdapterAction::Deploy => deploy(args), AdapterAction::Serve => serve(args), + other => Err(format!("spin adapter does not support {other:?}")), } } + + fn name(&self) -> &'static str { + "spin" + } } -pub fn register() { - register_adapter(&SPIN_ADAPTER); - register_adapter_blueprint(&SPIN_BLUEPRINT); +/// # Errors +/// Returns an error if the Spin CLI build command fails. +#[inline] +pub fn build(extra_args: &[String]) -> Result { + let manifest = + find_spin_manifest(env::current_dir().map_err(|err| err.to_string())?.as_path())?; + let manifest_dir = manifest + .parent() + .ok_or_else(|| "spin manifest has no parent directory".to_owned())?; + let cargo_manifest = manifest_dir.join("Cargo.toml"); + let crate_name = read_package_name(&cargo_manifest)?; + + let status = Command::new("cargo") + .args([ + "build", + "--release", + "--target", + TARGET_TRIPLE, + "--manifest-path", + cargo_manifest + .to_str() + .ok_or("invalid Cargo manifest path")?, + ]) + .args(extra_args) + .status() + .map_err(|err| format!("failed to run cargo build: {err}"))?; + if !status.success() { + return Err(format!("cargo build failed with status {status}")); + } + + let workspace_root = find_workspace_root(manifest_dir); + let artifact = locate_artifact(&workspace_root, manifest_dir, &crate_name)?; + let pkg_dir = workspace_root.join("pkg"); + fs::create_dir_all(&pkg_dir) + .map_err(|err| format!("failed to create {}: {err}", pkg_dir.display()))?; + let dest = pkg_dir.join(format!("{}.wasm", crate_name.replace('-', "_"))); + fs::copy(&artifact, &dest) + .map_err(|err| format!("failed to copy artifact to {}: {err}", dest.display()))?; + + Ok(dest) } -#[ctor] -fn register_ctor() { - register(); +/// # Errors +/// Returns an error if the Spin CLI deploy command fails. +#[inline] +pub fn deploy(extra_args: &[String]) -> Result<(), String> { + let manifest = + find_spin_manifest(env::current_dir().map_err(|err| err.to_string())?.as_path())?; + let manifest_dir = manifest + .parent() + .ok_or_else(|| "spin manifest has no parent directory".to_owned())?; + + let status = Command::new("spin") + .args(["deploy"]) + .args(extra_args) + .current_dir(manifest_dir) + .status() + .map_err(|err| format!("failed to run spin CLI: {err}"))?; + if !status.success() { + return Err(format!("spin deploy failed with status {status}")); + } + + Ok(()) } fn find_spin_manifest(start: &Path) -> Result { @@ -235,16 +204,15 @@ fn find_spin_manifest(start: &Path) -> Result { .filter_map(Result::ok) .map(|entry| entry.path().to_path_buf()) .filter(|path| { - path.file_name().map(|n| n == "spin.toml").unwrap_or(false) + path.file_name().is_some_and(|n| n == "spin.toml") && path .parent() - .map(|dir| dir.join("Cargo.toml").exists()) - .unwrap_or(false) + .is_some_and(|dir| dir.join("Cargo.toml").exists()) }) .collect(); if candidates.is_empty() { - return Err("could not locate spin.toml".to_string()); + return Err("could not locate spin.toml".to_owned()); } candidates.sort_by_key(|path| { @@ -262,7 +230,7 @@ fn locate_artifact( ) -> Result { let release_name = format!("{}.wasm", crate_name.replace('-', "_")); - if let Some(custom) = std::env::var_os("CARGO_TARGET_DIR") { + if let Some(custom) = env::var_os("CARGO_TARGET_DIR") { let candidate = PathBuf::from(custom) .join(TARGET_TRIPLE) .join("release") @@ -296,11 +264,65 @@ fn locate_artifact( )) } +#[inline] +pub fn register() { + register_adapter(&SPIN_ADAPTER); + register_adapter_blueprint(&SPIN_BLUEPRINT); +} + +#[ctor] +fn register_ctor() { + register(); +} + +/// # Errors +/// Returns an error if the Spin CLI up command fails. +#[inline] +pub fn serve(extra_args: &[String]) -> Result<(), String> { + let manifest = + find_spin_manifest(env::current_dir().map_err(|err| err.to_string())?.as_path())?; + let manifest_dir = manifest + .parent() + .ok_or_else(|| "spin manifest has no parent directory".to_owned())?; + + let status = Command::new("spin") + .args(["up"]) + .args(extra_args) + .current_dir(manifest_dir) + .status() + .map_err(|err| format!("failed to run spin CLI: {err}"))?; + if !status.success() { + return Err(format!("spin up failed with status {status}")); + } + + Ok(()) +} + #[cfg(test)] mod tests { use super::*; use tempfile::tempdir; + #[test] + fn finds_closest_manifest_when_multiple_exist() { + let dir = tempdir().unwrap(); + let root = dir.path(); + fs::write(root.join("Cargo.toml"), "[workspace]").unwrap(); + + let first = root.join("crates/first"); + fs::create_dir_all(&first).unwrap(); + fs::write(first.join("Cargo.toml"), "[package]\nname=\"first\"").unwrap(); + fs::write(first.join("spin.toml"), "spin_manifest_version = 2").unwrap(); + + let second = root.join("examples/second"); + fs::create_dir_all(&second).unwrap(); + fs::write(second.join("Cargo.toml"), "[package]\nname=\"second\"").unwrap(); + fs::write(second.join("spin.toml"), "spin_manifest_version = 2").unwrap(); + + let found = find_spin_manifest(&second).unwrap(); + assert_eq!(found, second.join("spin.toml")); + } + #[test] fn finds_manifest_in_current_directory() { let dir = tempdir().unwrap(); @@ -341,24 +363,4 @@ mod tests { let located = locate_artifact(workspace, &manifest_dir, "my-cool-crate").unwrap(); assert_eq!(located, artifact); } - - #[test] - fn finds_closest_manifest_when_multiple_exist() { - let dir = tempdir().unwrap(); - let root = dir.path(); - fs::write(root.join("Cargo.toml"), "[workspace]").unwrap(); - - let first = root.join("crates/first"); - fs::create_dir_all(&first).unwrap(); - fs::write(first.join("Cargo.toml"), "[package]\nname=\"first\"").unwrap(); - fs::write(first.join("spin.toml"), "spin_manifest_version = 2").unwrap(); - - let second = root.join("examples/second"); - fs::create_dir_all(&second).unwrap(); - fs::write(second.join("Cargo.toml"), "[package]\nname=\"second\"").unwrap(); - fs::write(second.join("spin.toml"), "spin_manifest_version = 2").unwrap(); - - let found = find_spin_manifest(&second).unwrap(); - assert_eq!(found, second.join("spin.toml")); - } } diff --git a/crates/edgezero-adapter-spin/src/context.rs b/crates/edgezero-adapter-spin/src/context.rs index 98d1dd1..4061d47 100644 --- a/crates/edgezero-adapter-spin/src/context.rs +++ b/crates/edgezero-adapter-spin/src/context.rs @@ -1,4 +1,6 @@ use std::net::IpAddr; +#[cfg(any(test, all(feature = "spin", target_arch = "wasm32")))] +use std::net::SocketAddr; use edgezero_core::http::Request; @@ -16,6 +18,20 @@ pub struct SpinRequestContext { pub full_url: Option, } +impl SpinRequestContext { + /// Retrieve a previously-inserted context from request extensions. + #[inline] + pub fn get(request: &Request) -> Option<&SpinRequestContext> { + request.extensions().get::() + } + + /// Store this context in the request's extensions. + #[inline] + pub fn insert(request: &mut Request, context: SpinRequestContext) { + request.extensions_mut().insert(context); + } +} + /// Parse an IP address from a `host:port` string. /// /// Falls back to parsing the raw value as a bare IP (no port) and also @@ -23,31 +39,29 @@ pub struct SpinRequestContext { #[cfg(any(test, all(feature = "spin", target_arch = "wasm32")))] pub(crate) fn parse_client_addr(raw: &str) -> Option { // Try `ip:port` (IPv4) or `[ip]:port` (IPv6 bracket notation). - if let Ok(sock) = raw.parse::() { + if let Ok(sock) = raw.parse::() { return Some(sock.ip()); } // Bare IP with no port. raw.parse::().ok() } -impl SpinRequestContext { - /// Store this context in the request's extensions. - pub fn insert(request: &mut Request, context: SpinRequestContext) { - request.extensions_mut().insert(context); - } - - /// Retrieve a previously-inserted context from request extensions. - pub fn get(request: &Request) -> Option<&SpinRequestContext> { - request.extensions().get::() - } -} - #[cfg(test)] mod tests { use super::*; use edgezero_core::body::Body; use edgezero_core::http::request_builder; - use std::str::FromStr; + use std::str::FromStr as _; + + #[test] + fn get_returns_none_when_missing() { + let request = request_builder() + .uri("https://example.com") + .body(Body::empty()) + .expect("request"); + + assert!(SpinRequestContext::get(&request).is_none()); + } #[test] fn inserts_and_retrieves_context() { @@ -58,7 +72,7 @@ mod tests { let context = SpinRequestContext { client_addr: Some(IpAddr::from_str("127.0.0.1").unwrap()), - full_url: Some("https://example.com/path".to_string()), + full_url: Some("https://example.com/path".to_owned()), }; SpinRequestContext::insert(&mut request, context); @@ -74,19 +88,8 @@ mod tests { } #[test] - fn get_returns_none_when_missing() { - let request = request_builder() - .uri("https://example.com") - .body(Body::empty()) - .expect("request"); - - assert!(SpinRequestContext::get(&request).is_none()); - } - - #[test] - fn parse_client_addr_ipv4_with_port() { - let ip = parse_client_addr("192.168.1.1:8080").unwrap(); - assert_eq!(ip, IpAddr::from_str("192.168.1.1").unwrap()); + fn parse_client_addr_invalid() { + assert!(parse_client_addr("not-an-ip").is_none()); } #[test] @@ -96,9 +99,9 @@ mod tests { } #[test] - fn parse_client_addr_ipv6_bracket() { - let ip = parse_client_addr("[::1]:3000").unwrap(); - assert_eq!(ip, IpAddr::from_str("::1").unwrap()); + fn parse_client_addr_ipv4_with_port() { + let ip = parse_client_addr("192.168.1.1:8080").unwrap(); + assert_eq!(ip, IpAddr::from_str("192.168.1.1").unwrap()); } #[test] @@ -108,7 +111,8 @@ mod tests { } #[test] - fn parse_client_addr_invalid() { - assert!(parse_client_addr("not-an-ip").is_none()); + fn parse_client_addr_ipv6_bracket() { + let ip = parse_client_addr("[::1]:3000").unwrap(); + assert_eq!(ip, IpAddr::from_str("::1").unwrap()); } } diff --git a/crates/edgezero-adapter-spin/src/decompress.rs b/crates/edgezero-adapter-spin/src/decompress.rs index 6899022..4b9bab2 100644 --- a/crates/edgezero-adapter-spin/src/decompress.rs +++ b/crates/edgezero-adapter-spin/src/decompress.rs @@ -1,8 +1,12 @@ // Used by proxy.rs (wasm32-gated) and tests; not reachable on native non-test builds. -#![allow(dead_code)] +#![allow( + dead_code, + reason = "wasm32-gated callers; native non-test build has no consumer" +)] use edgezero_core::error::EdgeError; -use std::io::Read; +use flate2::read::GzDecoder; +use std::io::Read as _; /// Maximum decompressed body size (64 MiB). Prevents zip-bomb attacks /// where a small compressed payload expands to exhaust WASI memory. @@ -11,7 +15,11 @@ use std::io::Read; /// module: proxy responses are untrusted external data that may legitimately /// decompress to a larger size, while response streams originate from the /// app's own handlers. -pub(crate) const MAX_DECOMPRESSED_SIZE: usize = 64 * 1024 * 1024; +const MAX_DECOMPRESSED_SIZE: usize = 64 * 1024 * 1024; +/// Same value as [`MAX_DECOMPRESSED_SIZE`] expressed as `u64` for the +/// `Read::take` API. Defined as a sibling constant so neither callsite +/// needs a numeric conversion. +const MAX_DECOMPRESSED_SIZE_U64: u64 = 64 * 1024 * 1024; /// Decompress a buffered body based on the `Content-Encoding` value. /// @@ -25,40 +33,38 @@ pub(crate) const MAX_DECOMPRESSED_SIZE: usize = 64 * 1024 * 1024; pub(crate) fn decompress_body(body: Vec, encoding: Option<&str>) -> Result, EdgeError> { match encoding { Some("gzip") => { - let mut decoder = flate2::read::GzDecoder::new(body.as_slice()); - let mut decoded = Vec::with_capacity(body.len().min(MAX_DECOMPRESSED_SIZE)); + let mut decoder = GzDecoder::new(body.as_slice()); + let mut output = Vec::with_capacity(body.len().min(MAX_DECOMPRESSED_SIZE)); decoder .by_ref() - .take(MAX_DECOMPRESSED_SIZE as u64 + 1) - .read_to_end(&mut decoded) - .map_err(|e| { - EdgeError::internal(anyhow::anyhow!("gzip decompression failed: {e}")) + .take(MAX_DECOMPRESSED_SIZE_U64.saturating_add(1)) + .read_to_end(&mut output) + .map_err(|err| { + EdgeError::internal(anyhow::anyhow!("gzip decompression failed: {err}")) })?; - if decoded.len() > MAX_DECOMPRESSED_SIZE { + if output.len() > MAX_DECOMPRESSED_SIZE { return Err(EdgeError::internal(anyhow::anyhow!( - "decompressed body exceeds maximum size of {} bytes", - MAX_DECOMPRESSED_SIZE + "decompressed body exceeds maximum size of {MAX_DECOMPRESSED_SIZE} bytes" ))); } - Ok(decoded) + Ok(output) } Some("br") => { let mut decoder = brotli::Decompressor::new(body.as_slice(), 8192); - let mut decoded = Vec::with_capacity(body.len().min(MAX_DECOMPRESSED_SIZE)); + let mut output = Vec::with_capacity(body.len().min(MAX_DECOMPRESSED_SIZE)); decoder .by_ref() - .take(MAX_DECOMPRESSED_SIZE as u64 + 1) - .read_to_end(&mut decoded) - .map_err(|e| { - EdgeError::internal(anyhow::anyhow!("brotli decompression failed: {e}")) + .take(MAX_DECOMPRESSED_SIZE_U64.saturating_add(1)) + .read_to_end(&mut output) + .map_err(|err| { + EdgeError::internal(anyhow::anyhow!("brotli decompression failed: {err}")) })?; - if decoded.len() > MAX_DECOMPRESSED_SIZE { + if output.len() > MAX_DECOMPRESSED_SIZE { return Err(EdgeError::internal(anyhow::anyhow!( - "decompressed body exceeds maximum size of {} bytes", - MAX_DECOMPRESSED_SIZE + "decompressed body exceeds maximum size of {MAX_DECOMPRESSED_SIZE} bytes" ))); } - Ok(decoded) + Ok(output) } _ => Ok(body), } @@ -69,16 +75,16 @@ mod tests { use super::*; use flate2::write::GzEncoder; use flate2::Compression; - use std::io::Write; + use std::io::Write as _; #[test] fn decompress_body_handles_identity() { let plain = b"hello plain".to_vec(); - let result = decompress_body(plain.clone(), None).unwrap(); - assert_eq!(result, plain); + let none_encoding = decompress_body(plain.clone(), None).unwrap(); + assert_eq!(none_encoding, plain); - let result = decompress_body(plain.clone(), Some("identity")).unwrap(); - assert_eq!(result, plain); + let identity_encoding = decompress_body(plain.clone(), Some("identity")).unwrap(); + assert_eq!(identity_encoding, plain); } #[test] @@ -94,10 +100,9 @@ mod tests { #[test] fn decompress_body_handles_brotli() { let mut compressed = Vec::new(); - { - let mut compressor = brotli::CompressorWriter::new(&mut compressed, 4096, 5, 21); - compressor.write_all(b"hello brotli").unwrap(); - } + let mut compressor = brotli::CompressorWriter::new(&mut compressed, 4096, 5, 21); + compressor.write_all(b"hello brotli").unwrap(); + drop(compressor); let result = decompress_body(compressed, Some("br")).unwrap(); assert_eq!(result, b"hello brotli"); @@ -108,8 +113,8 @@ mod tests { // Create a gzip payload that decompresses to more than MAX_DECOMPRESSED_SIZE. // We compress a stream of zeros which compresses extremely well. let mut encoder = GzEncoder::new(Vec::new(), Compression::best()); - let zeros = vec![0u8; 1024 * 1024]; // 1 MiB chunk - for _ in 0..65 { + let zeros = vec![0_u8; 1024 * 1024]; // 1 MiB chunk + for _ in 0_i32..65_i32 { encoder.write_all(&zeros).unwrap(); } let compressed = encoder.finish().unwrap(); diff --git a/crates/edgezero-adapter-spin/src/lib.rs b/crates/edgezero-adapter-spin/src/lib.rs index 9722fb5..82d8eea 100644 --- a/crates/edgezero-adapter-spin/src/lib.rs +++ b/crates/edgezero-adapter-spin/src/lib.rs @@ -3,22 +3,14 @@ #[cfg(feature = "cli")] pub mod cli; -mod context; +pub mod context; mod decompress; #[cfg(all(feature = "spin", target_arch = "wasm32"))] -mod proxy; +pub mod proxy; #[cfg(all(feature = "spin", target_arch = "wasm32"))] -mod request; +pub mod request; #[cfg(all(feature = "spin", target_arch = "wasm32"))] -mod response; - -pub use context::SpinRequestContext; -#[cfg(all(feature = "spin", target_arch = "wasm32"))] -pub use proxy::SpinProxyClient; -#[cfg(all(feature = "spin", target_arch = "wasm32"))] -pub use request::{dispatch, into_core_request}; -#[cfg(all(feature = "spin", target_arch = "wasm32"))] -pub use response::from_core_response; +pub mod response; /// Initialize the logger for Spin. /// @@ -27,6 +19,9 @@ pub use response::from_core_response; /// `#[cfg(all(feature = "spin", target_arch = "wasm32"))]` / /// `#[cfg(not(...))]` branches following the Fastly/Cloudflare pattern. // TODO: wire in real Spin logger when available +/// # Errors +/// Returns [`log::SetLoggerError`] if a global logger is already installed. +#[inline] pub fn init_logger() -> Result<(), log::SetLoggerError> { Ok(()) } @@ -83,5 +78,5 @@ pub async fn run_app( // would panic on every subsequent request. let _ = init_logger(); let app = A::build_app(); - dispatch(&app, req).await + request::dispatch(&app, req).await } diff --git a/crates/edgezero-adapter-spin/src/request.rs b/crates/edgezero-adapter-spin/src/request.rs index 736bb2c..ede4474 100644 --- a/crates/edgezero-adapter-spin/src/request.rs +++ b/crates/edgezero-adapter-spin/src/request.rs @@ -86,7 +86,7 @@ fn find_header_string(entries: &[(String, Vec)], name: &str) -> Option anyhow::Result { let core_request = into_core_request(req).await?; - let response = app.router().oneshot(core_request).await; + let response = app.router().oneshot(core_request).await?; Ok(from_core_response(response).await?) } diff --git a/crates/edgezero-adapter-spin/src/templates/Cargo.toml.hbs b/crates/edgezero-adapter-spin/src/templates/Cargo.toml.hbs index d6f3a2f..0cff912 100644 --- a/crates/edgezero-adapter-spin/src/templates/Cargo.toml.hbs +++ b/crates/edgezero-adapter-spin/src/templates/Cargo.toml.hbs @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" publish = false +[lints] +workspace = true + [lib] crate-type = ["cdylib"] path = "src/lib.rs" diff --git a/crates/edgezero-adapter-spin/tests/contract.rs b/crates/edgezero-adapter-spin/tests/contract.rs index 2df70de..2311db1 100644 --- a/crates/edgezero-adapter-spin/tests/contract.rs +++ b/crates/edgezero-adapter-spin/tests/contract.rs @@ -1,5 +1,13 @@ +// Integration test target (`tests/contract.rs`) — clippy doesn't apply +// `allow-*-in-tests` to integration tests by default, so opt back in here. +#![allow( + clippy::expect_used, + clippy::tests_outside_test_module, + reason = "integration test target — top-level test fns are correct here" +)] + use bytes::Bytes; -use edgezero_adapter_spin::SpinRequestContext; +use edgezero_adapter_spin::context::SpinRequestContext; use edgezero_core::app::App; use edgezero_core::body::Body; use edgezero_core::context::RequestContext; @@ -20,7 +28,7 @@ fn build_test_app() -> App { } async fn mirror_body(ctx: RequestContext) -> Result { - let bytes = ctx.request().body().as_bytes().to_vec(); + let bytes = ctx.request().body().as_bytes().expect("buffered").to_vec(); let response = response_builder() .status(StatusCode::OK) .body(Body::from(bytes)) @@ -80,10 +88,13 @@ fn router_dispatches_get_and_returns_response() { .body(Body::empty()) .expect("request"); - let response = block_on(app.router().oneshot(request)); + let response = block_on(app.router().oneshot(request)).expect("response"); assert_eq!(response.status(), StatusCode::OK); - assert_eq!(response.body().as_bytes(), b"http://example.com/uri"); + assert_eq!( + response.body().as_bytes().expect("buffered"), + b"http://example.com/uri" + ); } #[test] @@ -95,10 +106,13 @@ fn router_dispatches_post_with_body() { .body(Body::from(b"echo-payload".to_vec())) .expect("request"); - let response = block_on(app.router().oneshot(request)); + let response = block_on(app.router().oneshot(request)).expect("response"); assert_eq!(response.status(), StatusCode::OK); - assert_eq!(response.body().as_bytes(), b"echo-payload"); + assert_eq!( + response.body().as_bytes().expect("buffered"), + b"echo-payload" + ); } #[test] @@ -110,14 +124,14 @@ fn router_dispatches_streaming_route() { .body(Body::empty()) .expect("request"); - let response = block_on(app.router().oneshot(request)); + let response = block_on(app.router().oneshot(request)).expect("response"); assert_eq!(response.status(), StatusCode::OK); let (_, body) = response.into_parts(); let mut stream = body.into_stream().expect("should be a stream"); let collected = block_on(async { - use futures::StreamExt; + use futures::StreamExt as _; let mut out = Vec::new(); while let Some(chunk) = stream.next().await { out.extend_from_slice(&chunk.expect("chunk")); @@ -138,7 +152,7 @@ fn router_dispatches_streaming_route() { #[cfg(all(feature = "spin", target_arch = "wasm32"))] mod wasm { use super::*; - use edgezero_adapter_spin::from_core_response; + use edgezero_adapter_spin::response::from_core_response; #[test] fn from_core_response_translates_status_and_headers() { @@ -154,7 +168,7 @@ mod wasm { assert_eq!(*spin_response.status(), 201); let header = spin_response .headers() - .find(|(name, _)| name == "x-edgezero-res"); + .find(|(name, _)| *name == "x-edgezero-res"); assert!(header.is_some()); }); } diff --git a/crates/edgezero-adapter/Cargo.toml b/crates/edgezero-adapter/Cargo.toml index d16b796..07463ff 100644 --- a/crates/edgezero-adapter/Cargo.toml +++ b/crates/edgezero-adapter/Cargo.toml @@ -5,12 +5,14 @@ edition = "2021" license = { workspace = true } description = "Adapter registry and traits for EdgeZero adapters" +[lints] +workspace = true + [features] default = [] cli = ["dep:toml"] [dependencies] -once_cell = { workspace = true } toml = { workspace = true, optional = true } [dev-dependencies] diff --git a/crates/edgezero-adapter/src/cli_support.rs b/crates/edgezero-adapter/src/cli_support.rs index a73d6e8..aacbb9e 100644 --- a/crates/edgezero-adapter/src/cli_support.rs +++ b/crates/edgezero-adapter/src/cli_support.rs @@ -1,9 +1,14 @@ -#![allow(dead_code)] +#![allow( + dead_code, + reason = "helpers consumed conditionally via the `cli` feature in adapter crates" +)] use std::fs; use std::path::{Path, PathBuf}; /// Walks up the directory tree looking for `manifest_name` alongside a `Cargo.toml`. +#[inline] +#[must_use] pub fn find_manifest_upwards(start: &Path, manifest_name: &str) -> Option { let mut current = Some(start); while let Some(dir) = current { @@ -20,6 +25,8 @@ pub fn find_manifest_upwards(start: &Path, manifest_name: &str) -> Option PathBuf { let mut current: Option<&Path> = Some(dir); let mut candidate: Option = None; @@ -29,7 +36,7 @@ pub fn find_workspace_root(dir: &Path) -> PathBuf { if cargo.exists() { candidate = Some(path.to_path_buf()); if fs::read_to_string(&cargo) - .map(|s| s.contains("[workspace]")) + .map(|contents| contents.contains("[workspace]")) .unwrap_or(false) { break; @@ -42,23 +49,29 @@ pub fn find_workspace_root(dir: &Path) -> PathBuf { } /// Calculates the path distance between two directories based on shared leading components. -pub fn path_distance(a: &Path, b: &Path) -> usize { - let a_components: Vec<_> = a.components().collect(); - let b_components: Vec<_> = b.components().collect(); - - let mut common = 0; - for (ac, bc) in a_components.iter().zip(&b_components) { - if ac == bc { - common += 1; - } else { - break; - } - } - - (a_components.len() - common) + (b_components.len() - common) +#[inline] +#[must_use] +pub fn path_distance(left: &Path, right: &Path) -> usize { + let left_components: Vec<_> = left.components().collect(); + let right_components: Vec<_> = right.components().collect(); + + let common = left_components + .iter() + .zip(&right_components) + .take_while(|&(lhs, rhs)| lhs == rhs) + .count(); + + left_components + .len() + .saturating_sub(common) + .saturating_add(right_components.len().saturating_sub(common)) } /// Reads the crate name from a `Cargo.toml`, supporting both the inline and `[package]` forms. +/// +/// # Errors +/// Returns an error if the manifest cannot be read or its `[package].name` field is missing. +#[inline] pub fn read_package_name(manifest: &Path) -> Result { let contents = fs::read_to_string(manifest) .map_err(|err| format!("failed to read {}: {err}", manifest.display()))?; @@ -70,11 +83,11 @@ pub fn read_package_name(manifest: &Path) -> Result { .and_then(|pkg| pkg.get("name")) .and_then(|value| value.as_str()) { - return Ok(name.to_string()); + return Ok(name.to_owned()); } if let Some(name) = table.get("name").and_then(|value| value.as_str()) { - return Ok(name.to_string()); + return Ok(name.to_owned()); } Err(format!( @@ -145,9 +158,9 @@ mod tests { #[test] fn path_distance_counts_divergence() { - let a = Path::new("/a/b/c"); - let b = Path::new("/a/b/d/e"); - assert_eq!(path_distance(a, b), 3); + let left = Path::new("/a/b/c"); + let right = Path::new("/a/b/d/e"); + assert_eq!(path_distance(left, right), 3); } #[test] diff --git a/crates/edgezero-adapter/src/lib.rs b/crates/edgezero-adapter/src/lib.rs index 5b59436..607548d 100644 --- a/crates/edgezero-adapter/src/lib.rs +++ b/crates/edgezero-adapter/src/lib.rs @@ -1,6 +1,4 @@ -mod registry; - -pub use registry::{get_adapter, register_adapter, registered_adapters, Adapter, AdapterAction}; +pub mod registry; pub mod scaffold; diff --git a/crates/edgezero-adapter/src/registry.rs b/crates/edgezero-adapter/src/registry.rs index 2f9616c..e4b939d 100644 --- a/crates/edgezero-adapter/src/registry.rs +++ b/crates/edgezero-adapter/src/registry.rs @@ -1,48 +1,48 @@ -use once_cell::sync::Lazy; use std::collections::HashMap; -use std::sync::RwLock; +use std::sync::{LazyLock, PoisonError, RwLock}; -/// Actions the EdgeZero CLI can request from an adapter implementation. +static REGISTRY: LazyLock>> = + LazyLock::new(|| RwLock::new(HashMap::new())); + +/// Actions the `EdgeZero` CLI can request from an adapter implementation. #[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[non_exhaustive] pub enum AdapterAction { Build, Deploy, Serve, } -/// Interface implemented by adapter crates to integrate with the EdgeZero CLI. +/// Interface implemented by adapter crates to integrate with the `EdgeZero` CLI. pub trait Adapter: Sync + Send { - /// Name used to reference the adapter (case-insensitive). - fn name(&self) -> &'static str; - /// Execute the requested action with optional adapter-specific args. + /// + /// # Errors + /// Returns an error string if the requested adapter action fails. fn execute(&self, action: AdapterAction, args: &[String]) -> Result<(), String>; -} -static REGISTRY: Lazy>> = - Lazy::new(|| RwLock::new(HashMap::new())); + /// Name used to reference the adapter (case-insensitive). + fn name(&self) -> &'static str; +} /// Registers an adapter so it can be discovered by the CLI. +#[inline] pub fn register_adapter(adapter: &'static dyn Adapter) { - let mut registry = REGISTRY - .write() - .expect("edgezero adapter registry lock poisoned"); + let mut registry = REGISTRY.write().unwrap_or_else(PoisonError::into_inner); registry.insert(adapter.name().to_ascii_lowercase(), adapter); } /// Looks up an adapter by name. +#[inline] pub fn get_adapter(name: &str) -> Option<&'static dyn Adapter> { - let registry = REGISTRY - .read() - .expect("edgezero adapter registry lock poisoned"); + let registry = REGISTRY.read().unwrap_or_else(PoisonError::into_inner); registry.get(&name.to_ascii_lowercase()).copied() } /// Returns the names of all registered adapters. +#[inline] pub fn registered_adapters() -> Vec { - let registry = REGISTRY - .read() - .expect("edgezero adapter registry lock poisoned"); + let registry = REGISTRY.read().unwrap_or_else(PoisonError::into_inner); let mut names: Vec = registry.keys().cloned().collect(); names.sort(); names @@ -51,41 +51,39 @@ pub fn registered_adapters() -> Vec { #[cfg(test)] mod tests { use super::*; - use once_cell::sync::Lazy; use std::sync::atomic::{AtomicUsize, Ordering}; - use std::sync::Mutex; + use std::sync::{LazyLock, Mutex}; + static FIRST: TestAdapter = TestAdapter { + hit_value: 1, + name: "dummy", + }; static HIT: AtomicUsize = AtomicUsize::new(0); - static TEST_LOCK: Lazy> = Lazy::new(|| Mutex::new(())); + static OTHER: TestAdapter = TestAdapter { + hit_value: 3, + name: "other", + }; + static SECOND: TestAdapter = TestAdapter { + hit_value: 2, + name: "dummy", + }; + static TEST_LOCK: LazyLock> = LazyLock::new(|| Mutex::new(())); struct TestAdapter { - name: &'static str, hit_value: usize, + name: &'static str, } impl Adapter for TestAdapter { - fn name(&self) -> &'static str { - self.name - } - fn execute(&self, _action: AdapterAction, _args: &[String]) -> Result<(), String> { HIT.store(self.hit_value, Ordering::SeqCst); Ok(()) } - } - static FIRST: TestAdapter = TestAdapter { - name: "dummy", - hit_value: 1, - }; - static SECOND: TestAdapter = TestAdapter { - name: "dummy", - hit_value: 2, - }; - static OTHER: TestAdapter = TestAdapter { - name: "other", - hit_value: 3, - }; + fn name(&self) -> &'static str { + self.name + } + } fn reset() { let mut registry = super::REGISTRY.write().expect("registry lock"); @@ -125,6 +123,6 @@ mod tests { register_adapter(&OTHER); register_adapter(&FIRST); let adapters = registered_adapters(); - assert_eq!(adapters, vec!["dummy".to_string(), "other".to_string()]); + assert_eq!(adapters, vec!["dummy".to_owned(), "other".to_owned()]); } } diff --git a/crates/edgezero-adapter/src/scaffold.rs b/crates/edgezero-adapter/src/scaffold.rs index 3cfbae5..9060184 100644 --- a/crates/edgezero-adapter/src/scaffold.rs +++ b/crates/edgezero-adapter/src/scaffold.rs @@ -1,53 +1,66 @@ -use once_cell::sync::Lazy; use std::collections::HashMap; -use std::sync::RwLock; +use std::sync::{LazyLock, PoisonError, RwLock}; -/// Static handlebars template registration provided by an adapter. -#[derive(Clone, Copy)] -pub struct TemplateRegistration { - pub name: &'static str, - pub contents: &'static str, +static BLUEPRINT_REGISTRY: LazyLock>> = + LazyLock::new(|| RwLock::new(HashMap::new())); + +/// Complete blueprint describing how the CLI should scaffold the adapter. +pub struct AdapterBlueprint { + pub commands: CommandTemplates, + pub crate_suffix: &'static str, + pub dependencies: &'static [DependencySpec], + pub dependency_crate: &'static str, + pub dependency_repo_path: &'static str, + pub display_name: &'static str, + pub extra_dirs: &'static [&'static str], + pub files: &'static [AdapterFileSpec], + pub id: &'static str, + pub logging: LoggingDefaults, + pub manifest: ManifestSpec, + pub readme: ReadmeInfo, + pub run_module: &'static str, + pub template_registrations: &'static [TemplateRegistration], } /// Specifies which template renders to a given adapter-relative output file. #[derive(Clone, Copy)] pub struct AdapterFileSpec { - pub template: &'static str, pub output: &'static str, -} - -/// Describes a dependency entry inserted into an adapter crate manifest. -#[derive(Clone, Copy)] -pub struct DependencySpec { - pub key: &'static str, - pub repo_crate: &'static str, - pub fallback: &'static str, - pub features: &'static [&'static str], -} - -/// Provides manifest and build configuration defaults for an adapter. -#[derive(Clone, Copy)] -pub struct ManifestSpec { - pub manifest_filename: &'static str, - pub build_target: &'static str, - pub build_profile: &'static str, - pub build_features: &'static [&'static str], + pub template: &'static str, } /// Defines CLI command templates for adapter actions. #[derive(Clone, Copy)] pub struct CommandTemplates { pub build: &'static str, - pub serve: &'static str, pub deploy: &'static str, + pub serve: &'static str, +} + +/// Describes a dependency entry inserted into an adapter crate manifest. +#[derive(Clone, Copy)] +pub struct DependencySpec { + pub fallback: &'static str, + pub features: &'static [&'static str], + pub key: &'static str, + pub repo_crate: &'static str, } /// Specifies default logging configuration for a scaffolded adapter crate. #[derive(Clone, Copy)] pub struct LoggingDefaults { + pub echo_stdout: Option, pub endpoint: Option<&'static str>, pub level: &'static str, - pub echo_stdout: Option, +} + +/// Provides manifest and build configuration defaults for an adapter. +#[derive(Clone, Copy)] +pub struct ManifestSpec { + pub build_features: &'static [&'static str], + pub build_profile: &'static str, + pub build_target: &'static str, + pub manifest_filename: &'static str, } /// Supplies README snippets inserted for an adapter when scaffolding. @@ -58,94 +71,70 @@ pub struct ReadmeInfo { pub dev_steps: &'static [&'static str], } -/// Complete blueprint describing how the CLI should scaffold the adapter. -pub struct AdapterBlueprint { - pub id: &'static str, - pub display_name: &'static str, - pub crate_suffix: &'static str, - pub dependency_crate: &'static str, - pub dependency_repo_path: &'static str, - pub template_registrations: &'static [TemplateRegistration], - pub files: &'static [AdapterFileSpec], - pub extra_dirs: &'static [&'static str], - pub dependencies: &'static [DependencySpec], - pub manifest: ManifestSpec, - pub commands: CommandTemplates, - pub logging: LoggingDefaults, - pub readme: ReadmeInfo, - pub run_module: &'static str, +/// Static handlebars template registration provided by an adapter. +#[derive(Clone, Copy)] +pub struct TemplateRegistration { + pub contents: &'static str, + pub name: &'static str, } -static BLUEPRINT_REGISTRY: Lazy>> = - Lazy::new(|| RwLock::new(HashMap::new())); - /// Registers the blueprint for an adapter. Latest registration wins. +#[inline] pub fn register_adapter_blueprint(blueprint: &'static AdapterBlueprint) { let mut registry = BLUEPRINT_REGISTRY .write() - .expect("edgezero blueprint registry lock poisoned"); + .unwrap_or_else(PoisonError::into_inner); registry.insert(blueprint.id.to_ascii_lowercase(), blueprint); } /// Returns the known adapter blueprints sorted by adapter id. +#[inline] pub fn registered_blueprints() -> Vec<&'static AdapterBlueprint> { let registry = BLUEPRINT_REGISTRY .read() - .expect("edgezero blueprint registry lock poisoned"); + .unwrap_or_else(PoisonError::into_inner); let mut values: Vec<&'static AdapterBlueprint> = registry.values().copied().collect(); - values.sort_by(|a, b| a.id.cmp(b.id)); + values.sort_by(|left, right| left.id.cmp(right.id)); values } #[cfg(test)] mod tests { use super::*; - use once_cell::sync::Lazy; - use std::sync::Mutex; - - static FIRST_TEMPLATE: TemplateRegistration = TemplateRegistration { - name: "first", - contents: "a", - }; - - static SECOND_TEMPLATE: TemplateRegistration = TemplateRegistration { - name: "second", - contents: "b", - }; + use std::sync::{LazyLock, Mutex}; static BLUEPRINT_ALPHA: AdapterBlueprint = AdapterBlueprint { - id: "alpha", - display_name: "Alpha", + commands: CommandTemplates { + build: "build", + deploy: "deploy", + serve: "serve", + }, crate_suffix: "adapter-alpha", + dependencies: &[DependencySpec { + fallback: "alpha = \"0.1\"", + features: &[], + key: "dep_alpha", + repo_crate: "crates/alpha", + }], dependency_crate: "edgezero-adapter-alpha", dependency_repo_path: "crates/edgezero-adapter-alpha", - template_registrations: &[FIRST_TEMPLATE], + display_name: "Alpha", + extra_dirs: &["src"], files: &[AdapterFileSpec { - template: "first", output: "Cargo.toml", + template: "first", }], - extra_dirs: &["src"], - dependencies: &[DependencySpec { - key: "dep_alpha", - repo_crate: "crates/alpha", - fallback: "alpha = \"0.1\"", - features: &[], - }], - manifest: ManifestSpec { - manifest_filename: "alpha.toml", - build_target: "wasm32", - build_profile: "release", - build_features: &[], - }, - commands: CommandTemplates { - build: "build", - serve: "serve", - deploy: "deploy", - }, + id: "alpha", logging: LoggingDefaults { + echo_stdout: Some(true), endpoint: Some("stdout"), level: "info", - echo_stdout: Some(true), + }, + manifest: ManifestSpec { + build_features: &[], + build_profile: "release", + build_target: "wasm32", + manifest_filename: "alpha.toml", }, readme: ReadmeInfo { description: "desc", @@ -153,36 +142,36 @@ mod tests { dev_steps: &["step"], }, run_module: "module", + template_registrations: &[FIRST_TEMPLATE], }; static BLUEPRINT_BETA: AdapterBlueprint = AdapterBlueprint { - id: "beta", - display_name: "Beta", + commands: CommandTemplates { + build: "build", + deploy: "deploy", + serve: "serve", + }, crate_suffix: "adapter-beta", + dependencies: &[], dependency_crate: "edgezero-adapter-beta", dependency_repo_path: "crates/edgezero-adapter-beta", - template_registrations: &[SECOND_TEMPLATE], + display_name: "Beta", + extra_dirs: &[], files: &[AdapterFileSpec { - template: "second", output: "src/main.rs", + template: "second", }], - extra_dirs: &[], - dependencies: &[], - manifest: ManifestSpec { - manifest_filename: "beta.toml", - build_target: "wasm32", - build_profile: "release", - build_features: &[], - }, - commands: CommandTemplates { - build: "build", - serve: "serve", - deploy: "deploy", - }, + id: "beta", logging: LoggingDefaults { + echo_stdout: None, endpoint: None, level: "info", - echo_stdout: None, + }, + manifest: ManifestSpec { + build_features: &[], + build_profile: "release", + build_target: "wasm32", + manifest_filename: "beta.toml", }, readme: ReadmeInfo { description: "desc", @@ -190,9 +179,31 @@ mod tests { dev_steps: &[], }, run_module: "module", + template_registrations: &[SECOND_TEMPLATE], + }; + + static FIRST_TEMPLATE: TemplateRegistration = TemplateRegistration { + contents: "a", + name: "first", + }; + + static SECOND_TEMPLATE: TemplateRegistration = TemplateRegistration { + contents: "b", + name: "second", }; - static TEST_LOCK: Lazy> = Lazy::new(|| Mutex::new(())); + static TEST_LOCK: LazyLock> = LazyLock::new(|| Mutex::new(())); + + #[test] + fn latest_blueprint_wins() { + let _guard = TEST_LOCK.lock().expect("lock"); + super::BLUEPRINT_REGISTRY.write().expect("lock").clear(); + register_adapter_blueprint(&BLUEPRINT_ALPHA); + register_adapter_blueprint(&BLUEPRINT_ALPHA); + let blueprints = registered_blueprints(); + assert_eq!(blueprints.len(), 1); + assert_eq!(blueprints[0].id, "alpha"); + } #[test] fn registered_blueprints_sorted() { @@ -206,15 +217,4 @@ mod tests { .collect(); assert_eq!(ids, vec!["alpha", "beta"]); } - - #[test] - fn latest_blueprint_wins() { - let _guard = TEST_LOCK.lock().expect("lock"); - super::BLUEPRINT_REGISTRY.write().expect("lock").clear(); - register_adapter_blueprint(&BLUEPRINT_ALPHA); - register_adapter_blueprint(&BLUEPRINT_ALPHA); - let blueprints = registered_blueprints(); - assert_eq!(blueprints.len(), 1); - assert_eq!(blueprints[0].id, "alpha"); - } } diff --git a/crates/edgezero-cli/Cargo.toml b/crates/edgezero-cli/Cargo.toml index 5aa07e7..801e316 100644 --- a/crates/edgezero-cli/Cargo.toml +++ b/crates/edgezero-cli/Cargo.toml @@ -5,6 +5,9 @@ edition = "2021" license = { workspace = true } description = "EdgeZero CLI: build and deploy to multiple edge adapters" +[lints] +workspace = true + [dependencies] edgezero-core = { workspace = true } edgezero-adapter = { path = "../edgezero-adapter" } @@ -18,7 +21,9 @@ futures = { workspace = true } handlebars = { workspace = true } log = { workspace = true } serde = { workspace = true } +simple_logger = { workspace = true } serde_json = { workspace = true} +thiserror = { workspace = true } toml = { workspace = true } [build-dependencies] diff --git a/crates/edgezero-cli/build.rs b/crates/edgezero-cli/build.rs index 170a942..31f9cce 100644 --- a/crates/edgezero-cli/build.rs +++ b/crates/edgezero-cli/build.rs @@ -1,15 +1,17 @@ use std::env; +use std::error::Error; +use std::fmt::Write as _; use std::fs; use std::path::PathBuf; use toml::Value; -fn main() { +fn main() -> Result<(), Box> { println!("cargo:rerun-if-changed=build.rs"); - let manifest_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").expect("manifest dir")); + let manifest_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR")?); let manifest_path = manifest_dir.join("Cargo.toml"); - let manifest_str = fs::read_to_string(&manifest_path).expect("read Cargo.toml"); - let manifest: Value = toml::from_str(&manifest_str).expect("parse Cargo.toml"); + let manifest_str = fs::read_to_string(&manifest_path)?; + let manifest: Value = toml::from_str(&manifest_str)?; let dependencies = manifest .get("dependencies") @@ -23,12 +25,13 @@ fn main() { if !name.starts_with("edgezero-adapter-") { return None; } - let optional = match spec { - Value::Table(ref table) => table + let optional = if let Value::Table(table) = &spec { + table .get("optional") .and_then(Value::as_bool) - .unwrap_or(false), - _ => false, + .unwrap_or(false) + } else { + false }; if !optional { return None; @@ -38,7 +41,9 @@ fn main() { name.replace('-', "_").to_ascii_uppercase() ); println!("cargo:rerun-if-env-changed={feature_env}"); - let enabled = env::var(&feature_env).map(|v| v == "1").unwrap_or(false); + let enabled = env::var(&feature_env) + .map(|val| val == "1") + .unwrap_or(false); enabled.then_some(name) }) .collect(); @@ -53,14 +58,15 @@ fn main() { } else { for adapter in adapters { let crate_ident = adapter.replace('-', "_"); - generated.push_str(&format!( - "#[allow(unused_imports)]\npub(crate) use {ident} as _{ident};\n", - ident = crate_ident - )); + writeln!( + generated, + "#[expect(unused_imports, reason = \"adapter linked via feature gate\")]\n\ + pub(crate) use {crate_ident} as _{crate_ident};", + )?; } } - let out_path = - PathBuf::from(env::var("OUT_DIR").expect("OUT_DIR env")).join("linked_adapters.rs"); - fs::write(out_path, generated).expect("write linked_adapters.rs"); + let out_path = PathBuf::from(env::var("OUT_DIR")?).join("linked_adapters.rs"); + fs::write(out_path, generated)?; + Ok(()) } diff --git a/crates/edgezero-cli/src/adapter.rs b/crates/edgezero-cli/src/adapter.rs index 31d38e9..cf5c216 100644 --- a/crates/edgezero-cli/src/adapter.rs +++ b/crates/edgezero-cli/src/adapter.rs @@ -1,6 +1,8 @@ -use edgezero_adapter::{self as adapter_registry, AdapterAction}; +use edgezero_adapter::registry::{self as adapter_registry, AdapterAction}; use edgezero_core::manifest::{Manifest, ManifestLoader, ResolvedEnvironment}; +use std::env; +use std::fmt; use std::path::Path; use std::process::Command; @@ -13,6 +15,17 @@ pub enum Action { Serve, } +impl fmt::Display for Action { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let label = match self { + Action::Build => "build", + Action::Deploy => "deploy", + Action::Serve => "serve", + }; + f.write_str(label) + } +} + impl From for AdapterAction { fn from(value: Action) -> Self { match value { @@ -23,16 +36,45 @@ impl From for AdapterAction { } } +fn apply_environment( + adapter_name: &str, + environment: &ResolvedEnvironment, + command: &mut Command, +) -> Result<(), String> { + for binding in &environment.variables { + if let Some(value) = &binding.value { + command.env(&binding.env, value); + } + } + + let mut missing = Vec::new(); + for binding in &environment.secrets { + if env::var_os(&binding.env).is_none() { + missing.push(format!("{} (env `{}`)", binding.name, binding.env)); + } + } + + if !missing.is_empty() { + return Err(format!( + "adapter `{}` requires the following secrets to be set: {}", + adapter_name, + missing.join(", ") + )); + } + + Ok(()) +} + pub fn execute( adapter_name: &str, action: Action, - manifest: Option<&ManifestLoader>, + manifest_loader: Option<&ManifestLoader>, adapter_args: &[String], ) -> Result<(), String> { - if let Some(manifest) = manifest { - if let Some(command) = manifest_command(manifest.manifest(), adapter_name, action) { - let root = manifest.manifest().root().unwrap_or_else(|| Path::new(".")); - let env = manifest.manifest().environment_for(adapter_name); + if let Some(loader) = manifest_loader { + if let Some(command) = manifest_command(loader.manifest(), adapter_name, action) { + let root = loader.manifest().root().unwrap_or_else(|| Path::new(".")); + let env = loader.manifest().environment_for(adapter_name); return run_shell(command, root, adapter_name, action, Some(env), adapter_args); } } @@ -40,15 +82,13 @@ pub fn execute( let adapter = adapter_registry::get_adapter(adapter_name).ok_or_else(|| { let available = adapter_registry::registered_adapters(); if available.is_empty() { - if manifest.is_none() { + if manifest_loader.is_none() { format!( - "adapter `{}` is not registered in this build. Provide an `edgezero.toml` (or set `EDGEZERO_MANIFEST`) so the CLI can load adapters, or rebuild `edgezero-cli` with the `{adapter_name}` adapter feature enabled.", - adapter_name + "adapter `{adapter_name}` is not registered in this build. Provide an `edgezero.toml` (or set `EDGEZERO_MANIFEST`) so the CLI can load adapters, or rebuild `edgezero-cli` with the `{adapter_name}` adapter feature enabled." ) } else { format!( - "adapter `{}` is not registered (no adapters available)", - adapter_name + "adapter `{adapter_name}` is not registered (no adapters available)" ) } } else { @@ -63,6 +103,19 @@ pub fn execute( adapter.execute(AdapterAction::from(action), adapter_args) } +fn manifest_command<'manifest>( + manifest: &'manifest Manifest, + adapter_name: &str, + action: Action, +) -> Option<&'manifest str> { + let cfg = manifest.adapters.get(adapter_name)?; + match action { + Action::Build => cfg.commands.build.as_deref(), + Action::Deploy => cfg.commands.deploy.as_deref(), + Action::Serve => cfg.commands.serve.as_deref(), + } +} + fn run_shell( command: &str, cwd: &Path, @@ -72,11 +125,11 @@ fn run_shell( adapter_args: &[String], ) -> Result<(), String> { let full_command = if adapter_args.is_empty() { - command.to_string() + command.to_owned() } else { format!("{} {}", command, shell_join(adapter_args)) }; - println!( + log::info!( "[edgezero] executing `{}` for adapter `{}` in {}", full_command, adapter_name, @@ -90,121 +143,63 @@ fn run_shell( apply_environment(adapter_name, &env, &mut cmd)?; } - let status = cmd.status().map_err(|err| { - format!( - "failed to run {} command `{}`: {}", - action, full_command, err - ) - })?; + let status = cmd + .status() + .map_err(|err| format!("failed to run {action} command `{full_command}`: {err}"))?; if status.success() { Ok(()) } else { Err(format!( - "{} command `{}` exited with status {}", - action, full_command, status + "{action} command `{full_command}` exited with status {status}" )) } } -fn shell_join(args: &[String]) -> String { - args.iter() - .map(|arg| shell_escape(arg.as_str())) - .collect::>() - .join(" ") -} - fn shell_escape(arg: &str) -> String { if arg.is_empty() { - "''".to_string() + "''".to_owned() } else if arg .chars() - .all(|c| c.is_ascii_alphanumeric() || "._-/:=@".contains(c)) + .all(|ch| ch.is_ascii_alphanumeric() || "._-/:=@".contains(ch)) { - arg.to_string() + arg.to_owned() } else { format!("'{}'", arg.replace('\'', "'\"'\"'")) } } -fn apply_environment( - adapter_name: &str, - environment: &ResolvedEnvironment, - command: &mut Command, -) -> Result<(), String> { - for binding in &environment.variables { - if let Some(value) = &binding.value { - command.env(&binding.env, value); - } - } - - let mut missing = Vec::new(); - for binding in &environment.secrets { - if std::env::var_os(&binding.env).is_none() { - missing.push(format!("{} (env `{}`)", binding.name, binding.env)); - } - } - - if !missing.is_empty() { - return Err(format!( - "adapter `{}` requires the following secrets to be set: {}", - adapter_name, - missing.join(", ") - )); - } - - Ok(()) -} - -impl std::fmt::Display for Action { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let label = match self { - Action::Build => "build", - Action::Deploy => "deploy", - Action::Serve => "serve", - }; - f.write_str(label) - } -} - -fn manifest_command<'a>( - manifest: &'a Manifest, - adapter_name: &str, - action: Action, -) -> Option<&'a str> { - manifest - .adapters - .get(adapter_name) - .and_then(|cfg| match action { - Action::Build => cfg.commands.build.as_deref(), - Action::Deploy => cfg.commands.deploy.as_deref(), - Action::Serve => cfg.commands.serve.as_deref(), - }) +fn shell_join(args: &[String]) -> String { + args.iter() + .map(|arg| shell_escape(arg.as_str())) + .collect::>() + .join(" ") } #[cfg(test)] mod tests { use super::{apply_environment, ResolvedEnvironment}; use edgezero_core::manifest::ResolvedEnvironmentBinding; + use std::env; use std::process::Command; #[test] fn apply_environment_sets_defaults_and_checks_secrets() { - std::env::remove_var("EDGEZERO_TEST_SECRET"); + env::remove_var("EDGEZERO_TEST_SECRET"); let env = ResolvedEnvironment { - variables: vec![ResolvedEnvironmentBinding { - name: "Base".into(), - description: None, - env: "EDGEZERO_TEST_BASE".into(), - value: Some("https://demo".into()), - }], secrets: vec![ResolvedEnvironmentBinding { - name: "Secret".into(), description: None, env: "EDGEZERO_TEST_SECRET".into(), + name: "Secret".into(), value: None, }], + variables: vec![ResolvedEnvironmentBinding { + description: None, + env: "EDGEZERO_TEST_BASE".into(), + name: "Base".into(), + value: Some("https://demo".into()), + }], }; let adapter_name = "test-adapter"; @@ -212,16 +207,16 @@ mod tests { let result = apply_environment(adapter_name, &env, &mut Command::new("echo")); assert!(result.is_err()); - std::env::set_var("EDGEZERO_TEST_SECRET", "set"); + env::set_var("EDGEZERO_TEST_SECRET", "set"); let mut cmd = Command::new("echo"); apply_environment(adapter_name, &env, &mut cmd).expect("environment applied"); let has_var = cmd.get_envs().any(|(key, value)| { key.to_str() == Some("EDGEZERO_TEST_BASE") - && value.and_then(|v| v.to_str()) == Some("https://demo") + && value.and_then(|val| val.to_str()) == Some("https://demo") }); assert!(has_var); - std::env::remove_var("EDGEZERO_TEST_SECRET"); + env::remove_var("EDGEZERO_TEST_SECRET"); } #[test] @@ -235,9 +230,9 @@ mod tests { #[test] fn shell_join_combines_arguments_with_escaping() { let args = vec![ - "plain".to_string(), - "with space".to_string(), - "needs'quote".to_string(), + "plain".to_owned(), + "with space".to_owned(), + "needs'quote".to_owned(), ]; let joined = super::shell_join(&args); assert_eq!(joined, "plain 'with space' 'needs'\"'\"'quote'"); diff --git a/crates/edgezero-cli/src/args.rs b/crates/edgezero-cli/src/args.rs index f9a5589..bc7f1a5 100644 --- a/crates/edgezero-cli/src/args.rs +++ b/crates/edgezero-cli/src/args.rs @@ -9,8 +9,6 @@ pub struct Args { #[derive(Subcommand, Debug)] pub enum Command { - /// Create a new EdgeZero app skeleton (multi-crate workspace) - New(NewArgs), /// Build the project for a target edge Build { #[arg(long = "adapter", required = true)] @@ -25,43 +23,36 @@ pub enum Command { #[arg(trailing_var_arg = true, allow_hyphen_values = true)] adapter_args: Vec, }, + /// Run a local simulation (if available) + Dev, + /// Create a new `EdgeZero` app skeleton (multi-crate workspace) + New(NewArgs), /// Run a local simulation (adapter-specific) Serve { #[arg(long = "adapter", required = true)] adapter: String, }, - /// Run a local simulation (if available) - Dev, } #[derive(clap::Args, Debug)] pub struct NewArgs { - /// App name (e.g., my-edge-app) - pub name: String, /// Directory to create the app in (default: current dir) #[arg(long)] pub dir: Option, /// Force using a local path dependency to edgezero-core (if available) #[arg(long)] pub local_core: bool, + /// App name (e.g., my-edge-app) + pub name: String, } #[cfg(test)] mod tests { use super::*; - use clap::Parser; #[test] - fn parses_new_command_with_defaults() { - let args = Args::try_parse_from(["edgezero", "new", "demo-app"]).expect("parse new"); - match args.cmd { - Command::New(new_args) => { - assert_eq!(new_args.name, "demo-app"); - assert!(new_args.dir.is_none()); - assert!(!new_args.local_core); - } - other => panic!("unexpected command: {other:?}"), - } + fn missing_required_adapter_returns_error() { + Args::try_parse_from(["edgezero", "build"]).expect_err("missing --adapter"); } #[test] @@ -76,20 +67,25 @@ mod tests { "value", ]) .expect("parse build"); - match args.cmd { - Command::Build { - adapter, - adapter_args, - } => { - assert_eq!(adapter, "fastly"); - assert_eq!(adapter_args, vec!["--flag", "value"]); - } - other => panic!("unexpected command: {other:?}"), - } + let Command::Build { + adapter, + adapter_args, + } = args.cmd + else { + panic!("expected Command::Build"); + }; + assert_eq!(adapter, "fastly"); + assert_eq!(adapter_args, vec!["--flag", "value"]); } #[test] - fn missing_required_adapter_returns_error() { - assert!(Args::try_parse_from(["edgezero", "build"]).is_err()); + fn parses_new_command_with_defaults() { + let args = Args::try_parse_from(["edgezero", "new", "demo-app"]).expect("parse new"); + let Command::New(new_args) = args.cmd else { + panic!("expected Command::New"); + }; + assert_eq!(new_args.name, "demo-app"); + assert!(new_args.dir.is_none()); + assert!(!new_args.local_core); } } diff --git a/crates/edgezero-cli/src/dev_server.rs b/crates/edgezero-cli/src/dev_server.rs index 7cb6e05..3bd4f0c 100644 --- a/crates/edgezero-cli/src/dev_server.rs +++ b/crates/edgezero-cli/src/dev_server.rs @@ -1,9 +1,11 @@ #![cfg(feature = "edgezero-adapter-axum")] +use std::env; +use std::io::ErrorKind; use std::net::SocketAddr; use std::path::PathBuf; -use edgezero_adapter_axum::{AxumDevServer, AxumDevServerConfig}; +use edgezero_adapter_axum::dev_server::{AxumDevServer, AxumDevServerConfig}; use edgezero_core::manifest::ManifestLoader; use edgezero_core::router::RouterService; @@ -16,17 +18,17 @@ use edgezero_core::{action, extractor::Path, response::Text}; #[cfg(feature = "dev-example")] use app_demo_core::App; #[cfg(feature = "dev-example")] -use edgezero_core::app::Hooks; +use edgezero_core::app::Hooks as _; pub fn run_dev() { match try_run_manifest_axum() { Ok(true) => return, Ok(false) => {} - Err(err) => eprintln!("[edgezero] dev manifest error: {err}"), + Err(err) => log::error!("[edgezero] dev manifest error: {err}"), } let addr = SocketAddr::from(([127, 0, 0, 1], 8787)); - println!( + log::info!( "[edgezero] dev: starting local server on http://{}:{}", addr.ip(), addr.port() @@ -40,7 +42,7 @@ pub fn run_dev() { let server = AxumDevServer::with_config(router, config); if let Err(err) = server.run() { - eprintln!("[edgezero] dev server error: {err}"); + log::error!("[edgezero] dev server error: {err}"); } } @@ -84,9 +86,8 @@ async fn dev_echo(Path(params): Path) -> Text { } fn try_run_manifest_axum() -> Result { - let manifest = match load_manifest_optional()? { - Some(manifest) => manifest, - None => return Ok(false), + let Some(manifest) = load_manifest_optional()? else { + return Ok(false); }; if manifest.manifest().adapters.contains_key("axum") { @@ -99,13 +100,12 @@ fn try_run_manifest_axum() -> Result { } fn load_manifest_optional() -> Result, String> { - let path = std::env::var("EDGEZERO_MANIFEST") - .map(PathBuf::from) - .unwrap_or_else(|_| PathBuf::from("edgezero.toml")); + let path = env::var("EDGEZERO_MANIFEST") + .map_or_else(|_| PathBuf::from("edgezero.toml"), PathBuf::from); match ManifestLoader::from_path(&path) { Ok(manifest) => Ok(Some(manifest)), - Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(None), + Err(err) if err.kind() == ErrorKind::NotFound => Ok(None), Err(err) => Err(format!("failed to load {}: {err}", path.display())), } } diff --git a/crates/edgezero-cli/src/generator.rs b/crates/edgezero-cli/src/generator.rs index 1bab98b..4c97230 100644 --- a/crates/edgezero-cli/src/generator.rs +++ b/crates/edgezero-cli/src/generator.rs @@ -1,81 +1,128 @@ use crate::args::NewArgs; use crate::scaffold::{ register_templates, resolve_dep_line, sanitize_crate_name, write_tmpl, ResolvedDependency, + ScaffoldError, }; use edgezero_adapter::scaffold; use edgezero_adapter::scaffold::AdapterBlueprint; use handlebars::Handlebars; use serde_json::{Map, Value}; use std::collections::BTreeMap; -use std::fmt::Write as _; +use std::env; +use std::fmt::{self, Write as _}; +use std::fs; +use std::io; use std::path::{Path, PathBuf}; use std::process::Command; +use thiserror::Error; + +/// Errors produced by `edgezero new`. +#[derive(Debug, Error)] +pub enum GeneratorError { + /// An adapter context was constructed with no terminal path component. + /// Should be unreachable given the layout we build, but propagated rather + /// than panicking on the request path. + #[error("adapter context directory has no file name: {}", .0.display())] + AdapterDirMissingFileName(PathBuf), + /// `write!`/`writeln!` to an in-memory `String` buffer failed. In + /// practice the only way this can fire is a malformed `Display` impl in + /// one of the rendered values; surfaced as a typed error rather than a + /// silent unwrap. + #[error("failed to format generator output: {0}")] + Format(#[from] fmt::Error), + /// A filesystem read/write/metadata operation failed while preparing the + /// project skeleton. + #[error("io error at {path}: {source}")] + Io { + path: PathBuf, + #[source] + source: io::Error, + }, + /// The target output directory already exists; refusing to overwrite. + #[error("directory '{}' already exists", .0.display())] + OutputDirExists(PathBuf), + /// A template under the workspace scaffold could not be rendered or + /// written. Wraps [`ScaffoldError`] for context. + #[error(transparent)] + Scaffold(#[from] ScaffoldError), +} -struct AdapterContext<'a> { - blueprint: &'a AdapterBlueprint, - dir: PathBuf, +impl GeneratorError { + fn io(path: impl Into, source: io::Error) -> Self { + GeneratorError::Io { + path: path.into(), + source, + } + } +} + +struct AdapterContext<'blueprint> { + blueprint: &'blueprint AdapterBlueprint, data_entries: Vec<(String, String)>, + dir: PathBuf, } struct ProjectLayout { + core_dir: PathBuf, + core_mod: String, + core_name: String, + crates_dir: PathBuf, name: String, out_dir: PathBuf, - crates_dir: PathBuf, - core_name: String, - core_dir: PathBuf, project_mod: String, - core_mod: String, } impl ProjectLayout { - fn new(args: &NewArgs) -> std::io::Result { + fn new(args: &NewArgs) -> Result { let name = sanitize_crate_name(&args.name); - let base_dir = args - .dir - .as_deref() - .map(PathBuf::from) - .unwrap_or_else(|| std::env::current_dir().unwrap()); + let base_dir = match args.dir.as_deref() { + Some(dir) => PathBuf::from(dir), + None => env::current_dir().map_err(|err| GeneratorError::io(".", err))?, + }; let out_dir = base_dir.join(&name); if out_dir.exists() { - return Err(std::io::Error::new( - std::io::ErrorKind::AlreadyExists, - format!("directory '{}' already exists", out_dir.display()), - )); + return Err(GeneratorError::OutputDirExists(out_dir)); } - println!("[edgezero] creating project at {}", out_dir.display()); + log::info!("[edgezero] creating project at {}", out_dir.display()); let crates_dir = out_dir.join("crates"); - let core_name = format!("{}-core", name); + let core_name = format!("{name}-core"); let core_dir = crates_dir.join(&core_name); - std::fs::create_dir_all(core_dir.join("src"))?; + let core_src = core_dir.join("src"); + fs::create_dir_all(&core_src).map_err(|err| GeneratorError::io(&core_src, err))?; + let project_mod = name.replace('-', "_"); + let core_mod = core_name.replace('-', "_"); Ok(ProjectLayout { - project_mod: name.replace('-', "_"), - core_mod: core_name.replace('-', "_"), - core_name, core_dir, + core_mod, + core_name, crates_dir, - out_dir, name, + out_dir, + project_mod, }) } } struct AdapterArtifacts { - contexts: Vec>, adapter_ids: Vec, - workspace_members: Vec, + contexts: Vec>, manifest_sections: String, readme_adapter_crates: String, readme_adapter_dev: String, + workspace_members: Vec, } -pub fn generate_new(args: NewArgs) -> std::io::Result<()> { - let layout = ProjectLayout::new(&args)?; +/// # Errors +/// Returns [`GeneratorError`] if any filesystem operation, template render, +/// or layout invariant fails. +pub fn generate_new(args: &NewArgs) -> Result<(), GeneratorError> { + let layout = ProjectLayout::new(args)?; let mut workspace_dependencies = seed_workspace_dependencies(); - let cwd = std::env::current_dir().unwrap(); + let cwd = env::current_dir().map_err(|err| GeneratorError::io(".", err))?; let core_crate_line = resolve_core_dependency(&layout, &cwd, &mut workspace_dependencies); let adapter_artifacts = collect_adapter_data(&layout, &cwd, &mut workspace_dependencies)?; @@ -98,7 +145,7 @@ pub fn generate_new(args: NewArgs) -> std::io::Result<()> { render_templates(&layout, &adapter_artifacts.contexts, &data_value)?; initialize_git_repo(&layout.out_dir); - println!( + log::info!( "[edgezero] created new multi-crate app at {}", layout.out_dir.display() ); @@ -108,38 +155,38 @@ pub fn generate_new(args: NewArgs) -> std::io::Result<()> { fn seed_workspace_dependencies() -> BTreeMap { let mut deps = BTreeMap::new(); - deps.insert("bytes".to_string(), "bytes = \"1\"".to_string()); - deps.insert("anyhow".to_string(), "anyhow = \"1\"".to_string()); + deps.insert("bytes".to_owned(), "bytes = \"1\"".to_owned()); + deps.insert("anyhow".to_owned(), "anyhow = \"1\"".to_owned()); deps.insert( - "futures".to_string(), + "futures".to_owned(), "futures = { version = \"0.3\", default-features = false, features = [\"std\", \"executor\"] }" - .to_string(), + .to_owned(), ); - deps.insert("axum".to_string(), "axum = \"0.8\"".to_string()); + deps.insert("axum".to_owned(), "axum = \"0.8\"".to_owned()); deps.insert( - "serde".to_string(), - "serde = { version = \"1\", features = [\"derive\"] }".to_string(), + "serde".to_owned(), + "serde = { version = \"1\", features = [\"derive\"] }".to_owned(), ); - deps.insert("log".to_string(), "log = \"0.4\"".to_string()); + deps.insert("log".to_owned(), "log = \"0.4\"".to_owned()); deps.insert( - "simple_logger".to_string(), - "simple_logger = \"4\"".to_string(), + "simple_logger".to_owned(), + "simple_logger = \"4\"".to_owned(), ); deps.insert( - "worker".to_string(), + "worker".to_owned(), "worker = { version = \"0.7\", default-features = false, features = [\"http\"] }" - .to_string(), + .to_owned(), ); - deps.insert("fastly".to_string(), "fastly = \"0.11\"".to_string()); - deps.insert("once_cell".to_string(), "once_cell = \"1\"".to_string()); + deps.insert("fastly".to_owned(), "fastly = \"0.11\"".to_owned()); + deps.insert("once_cell".to_owned(), "once_cell = \"1\"".to_owned()); deps.insert( - "tokio".to_string(), - "tokio = { version = \"1\", features = [\"macros\", \"rt-multi-thread\"] }".to_string(), + "tokio".to_owned(), + "tokio = { version = \"1\", features = [\"macros\", \"rt-multi-thread\"] }".to_owned(), ); - deps.insert("tracing".to_string(), "tracing = \"0.1\"".to_string()); + deps.insert("tracing".to_owned(), "tracing = \"0.1\"".to_owned()); deps.insert( - "spin-sdk".to_string(), - "spin-sdk = { version = \"5.2\", default-features = false }".to_string(), + "spin-sdk".to_owned(), + "spin-sdk = { version = \"5.2\", default-features = false }".to_owned(), ); deps } @@ -169,7 +216,7 @@ fn collect_adapter_data( layout: &ProjectLayout, cwd: &Path, workspace_dependencies: &mut BTreeMap, -) -> std::io::Result { +) -> Result { let mut contexts = Vec::new(); let mut adapter_ids = Vec::new(); let mut workspace_members = Vec::new(); @@ -177,159 +224,208 @@ fn collect_adapter_data( let mut readme_adapter_crates = String::new(); let mut readme_adapter_dev = String::new(); - let blueprints = scaffold::registered_blueprints(); - - for blueprint in blueprints.iter().copied() { + for blueprint in scaffold::registered_blueprints().iter().copied() { let crate_name = format!("{}-{}", layout.name, blueprint.crate_suffix); let adapter_dir = layout.crates_dir.join(&crate_name); - std::fs::create_dir_all(&adapter_dir)?; + fs::create_dir_all(&adapter_dir).map_err(|err| GeneratorError::io(&adapter_dir, err))?; for dir_name in blueprint.extra_dirs { - std::fs::create_dir_all(adapter_dir.join(dir_name))?; + let extra = adapter_dir.join(dir_name); + fs::create_dir_all(&extra).map_err(|err| GeneratorError::io(&extra, err))?; } - let mut data_entries: Vec<(String, String)> = Vec::new(); - data_entries.push((format!("proj_{}", blueprint.id), crate_name.clone())); - data_entries.push(( - format!("proj_{}_underscored", blueprint.id), - crate_name.replace('-', "_"), - )); - - for dep in blueprint.dependencies { - let ResolvedDependency { - name, - workspace_line, - crate_line, - } = resolve_dep_line( - &layout.out_dir, - cwd, - dep.repo_crate, - dep.fallback, - dep.features, - ); - workspace_dependencies.entry(name).or_insert(workspace_line); - data_entries.push((dep.key.to_string(), crate_line)); - } + let crate_dir_rel = format!("crates/{crate_name}"); + let data_entries = blueprint_data_entries( + layout, + cwd, + blueprint, + &crate_name, + &crate_dir_rel, + workspace_dependencies, + ); - let crate_dir_rel = format!("crates/{}", crate_name); - - // Compute the relative path from the adapter crate to the workspace - // target directory so templates can reference build artifacts. - let depth = crate_dir_rel.matches('/').count() + 1; - data_entries.push(( - format!("target_dir_{}", blueprint.id), - format!("{}target", "../".repeat(depth)), - )); - - let build_cmd = blueprint - .commands - .build - .replace("{crate}", &crate_name) - .replace("{crate_dir}", &crate_dir_rel); - let serve_cmd = blueprint - .commands - .serve - .replace("{crate}", &crate_name) - .replace("{crate_dir}", &crate_dir_rel); - let deploy_cmd = blueprint - .commands - .deploy - .replace("{crate}", &crate_name) - .replace("{crate_dir}", &crate_dir_rel); - - let mut manifest_section = String::new(); - writeln!( - manifest_section, - "[adapters.{}.adapter]\ncrate = \"crates/{}\"\nmanifest = \"crates/{}/{}\"\n", - blueprint.id, crate_name, crate_name, blueprint.manifest.manifest_filename - ) - .unwrap(); - writeln!( - manifest_section, - "[adapters.{}.build]\ntarget = \"{}\"\nprofile = \"{}\"", - blueprint.id, blueprint.manifest.build_target, blueprint.manifest.build_profile - ) - .unwrap(); - if !blueprint.manifest.build_features.is_empty() { - let joined = blueprint - .manifest - .build_features - .iter() - .map(|f| format!("\"{}\"", f)) - .collect::>() - .join(", "); - writeln!(manifest_section, "features = [{}]", joined).unwrap(); - } - manifest_section.push('\n'); - writeln!( - manifest_section, - "[adapters.{}.commands]\nbuild = \"{}\"\ndeploy = \"{}\"\nserve = \"{}\"\n", - blueprint.id, build_cmd, deploy_cmd, serve_cmd - ) - .unwrap(); - - manifest_section.push('\n'); - writeln!(manifest_section, "[adapters.{}.logging]", blueprint.id).unwrap(); - if blueprint.id == "fastly" { - writeln!( - manifest_section, - "endpoint = \"{}_log\"", - layout.project_mod - ) - .unwrap(); - } else if let Some(endpoint) = blueprint.logging.endpoint { - writeln!(manifest_section, "endpoint = \"{}\"", endpoint).unwrap(); - } - writeln!(manifest_section, "level = \"{}\"", blueprint.logging.level).unwrap(); - if let Some(echo_stdout) = blueprint.logging.echo_stdout { - writeln!( - manifest_section, - "echo_stdout = {}", - if echo_stdout { "true" } else { "false" } - ) - .unwrap(); - } - manifest_section.push('\n'); - - let description = blueprint - .readme - .description - .replace("{display}", blueprint.display_name); - readme_adapter_crates.push_str(&format!("- `crates/{}`: {}\n", crate_name, description)); - - let heading = blueprint - .readme - .dev_heading - .replace("{display}", blueprint.display_name); - readme_adapter_dev.push_str(&format!("- {}:\n", heading)); - for step in blueprint.readme.dev_steps { - let formatted = step - .replace("{crate}", &crate_name) - .replace("{crate_dir}", &crate_dir_rel); - readme_adapter_dev.push_str(&format!(" - {}\n", formatted)); - } - readme_adapter_dev.push('\n'); + manifest_sections.push_str(&render_manifest_section( + layout, + blueprint, + &crate_name, + &crate_dir_rel, + )?); + append_readme_entries( + blueprint, + &crate_name, + &crate_dir_rel, + &mut readme_adapter_crates, + &mut readme_adapter_dev, + )?; - manifest_sections.push_str(&manifest_section); - workspace_members.push(format!(" \"crates/{}\",", crate_name)); - adapter_ids.push(blueprint.id.to_string()); + workspace_members.push(format!(" \"crates/{crate_name}\",")); + adapter_ids.push(blueprint.id.to_owned()); contexts.push(AdapterContext { blueprint, - dir: adapter_dir, data_entries, + dir: adapter_dir, }); } Ok(AdapterArtifacts { - contexts, adapter_ids, - workspace_members, + contexts, manifest_sections, readme_adapter_crates, readme_adapter_dev, + workspace_members, }) } +/// Build the `(key, value)` template-data entries for a single adapter blueprint, +/// resolving its dependencies and recording them in `workspace_dependencies`. +fn blueprint_data_entries( + layout: &ProjectLayout, + cwd: &Path, + blueprint: &'static AdapterBlueprint, + crate_name: &str, + crate_dir_rel: &str, + workspace_dependencies: &mut BTreeMap, +) -> Vec<(String, String)> { + let mut data_entries: Vec<(String, String)> = Vec::new(); + data_entries.push((format!("proj_{}", blueprint.id), crate_name.to_owned())); + data_entries.push(( + format!("proj_{}_underscored", blueprint.id), + crate_name.replace('-', "_"), + )); + + for dep in blueprint.dependencies { + let ResolvedDependency { + name, + workspace_line, + crate_line, + } = resolve_dep_line( + &layout.out_dir, + cwd, + dep.repo_crate, + dep.fallback, + dep.features, + ); + workspace_dependencies.entry(name).or_insert(workspace_line); + data_entries.push((dep.key.to_owned(), crate_line)); + } + + // Compute the relative path from the adapter crate to the workspace + // target directory so templates can reference build artifacts. + let depth = crate_dir_rel.matches('/').count().saturating_add(1); + data_entries.push(( + format!("target_dir_{}", blueprint.id), + format!("{}target", "../".repeat(depth)), + )); + + data_entries +} + +/// Render the `[adapters..*]` TOML stanza for a single blueprint. +fn render_manifest_section( + layout: &ProjectLayout, + blueprint: &'static AdapterBlueprint, + crate_name: &str, + crate_dir_rel: &str, +) -> Result { + let build_cmd = blueprint + .commands + .build + .replace("{crate}", crate_name) + .replace("{crate_dir}", crate_dir_rel); + let serve_cmd = blueprint + .commands + .serve + .replace("{crate}", crate_name) + .replace("{crate_dir}", crate_dir_rel); + let deploy_cmd = blueprint + .commands + .deploy + .replace("{crate}", crate_name) + .replace("{crate_dir}", crate_dir_rel); + + let mut out = String::new(); + writeln!( + out, + "[adapters.{}.adapter]\ncrate = \"crates/{}\"\nmanifest = \"crates/{}/{}\"\n", + blueprint.id, crate_name, crate_name, blueprint.manifest.manifest_filename, + )?; + writeln!( + out, + "[adapters.{}.build]\ntarget = \"{}\"\nprofile = \"{}\"", + blueprint.id, blueprint.manifest.build_target, blueprint.manifest.build_profile, + )?; + if !blueprint.manifest.build_features.is_empty() { + let joined = blueprint + .manifest + .build_features + .iter() + .map(|feat| format!("\"{feat}\"")) + .collect::>() + .join(", "); + writeln!(out, "features = [{joined}]")?; + } + out.push('\n'); + writeln!( + out, + "[adapters.{}.commands]\nbuild = \"{}\"\ndeploy = \"{}\"\nserve = \"{}\"\n", + blueprint.id, build_cmd, deploy_cmd, serve_cmd, + )?; + + out.push('\n'); + writeln!(out, "[adapters.{}.logging]", blueprint.id)?; + let endpoint_value = if blueprint.id == "fastly" { + Some(format!("{}_log", layout.project_mod)) + } else { + blueprint.logging.endpoint.map(str::to_owned) + }; + if let Some(endpoint) = endpoint_value { + writeln!(out, "endpoint = \"{endpoint}\"")?; + } + writeln!(out, "level = \"{}\"", blueprint.logging.level)?; + if let Some(echo_stdout) = blueprint.logging.echo_stdout { + writeln!( + out, + "echo_stdout = {}", + if echo_stdout { "true" } else { "false" }, + )?; + } + out.push('\n'); + Ok(out) +} + +/// Append the per-adapter README entries for crates list and dev-step list. +fn append_readme_entries( + blueprint: &'static AdapterBlueprint, + crate_name: &str, + crate_dir_rel: &str, + readme_adapter_crates: &mut String, + readme_adapter_dev: &mut String, +) -> Result<(), fmt::Error> { + let description = blueprint + .readme + .description + .replace("{display}", blueprint.display_name); + writeln!( + readme_adapter_crates, + "- `crates/{crate_name}`: {description}" + )?; + + let heading = blueprint + .readme + .dev_heading + .replace("{display}", blueprint.display_name); + writeln!(readme_adapter_dev, "- {heading}:")?; + for step in blueprint.readme.dev_steps { + let formatted = step + .replace("{crate}", crate_name) + .replace("{crate_dir}", crate_dir_rel); + writeln!(readme_adapter_dev, " - {formatted}")?; + } + readme_adapter_dev.push('\n'); + Ok(()) +} + fn build_base_data( layout: &ProjectLayout, core_crate_line: &str, @@ -346,13 +442,13 @@ fn build_base_data( data.insert("proj_mod".into(), Value::String(layout.project_mod.clone())); data.insert( "dep_edgezero_core".into(), - Value::String(core_crate_line.to_string()), + Value::String(core_crate_line.to_owned()), ); let adapter_list_str = artifacts .adapter_ids .iter() - .map(|id| format!("\"{}\"", id)) + .map(|id| format!("\"{id}\"")) .collect::>() .join(", "); data.insert("adapter_list".into(), Value::String(adapter_list_str)); @@ -390,11 +486,11 @@ fn render_templates( layout: &ProjectLayout, adapter_contexts: &[AdapterContext], data_value: &Value, -) -> std::io::Result<()> { +) -> Result<(), GeneratorError> { let mut hbs = Handlebars::new(); register_templates(&mut hbs); - println!("[edgezero] writing workspace files"); + log::info!("[edgezero] writing workspace files"); write_tmpl( &hbs, "root_Cargo_toml", @@ -419,8 +515,14 @@ fn render_templates( data_value, &layout.out_dir.join(".gitignore"), )?; + write_tmpl( + &hbs, + "root_clippy_toml", + data_value, + &layout.out_dir.join("clippy.toml"), + )?; - println!("[edgezero] writing core crate {}", layout.core_name); + log::info!("[edgezero] writing core crate {}", layout.core_name); write_tmpl( &hbs, "core_Cargo_toml", @@ -441,9 +543,13 @@ fn render_templates( )?; for context in adapter_contexts { - println!( + let crate_dir_name = context + .dir + .file_name() + .ok_or_else(|| GeneratorError::AdapterDirMissingFileName(context.dir.clone()))?; + log::info!( "[edgezero] writing adapter crate {}", - context.dir.file_name().unwrap().to_string_lossy() + crate_dir_name.to_string_lossy(), ); for file in context.blueprint.files { write_tmpl( @@ -459,7 +565,7 @@ fn render_templates( } fn initialize_git_repo(out_dir: &Path) { - println!("[edgezero] initializing git repository"); + log::info!("[edgezero] initializing git repository"); match Command::new("git") .arg("init") .arg("--quiet") @@ -467,19 +573,16 @@ fn initialize_git_repo(out_dir: &Path) { .status() { Ok(status) if status.success() => { - println!( + log::info!( "[edgezero] initialized empty Git repository in {}/.git/", out_dir.display() ); } Ok(status) => { - eprintln!("[edgezero] warning: git init exited with status {status}"); + log::warn!("[edgezero] warning: git init exited with status {status}"); } Err(err) => { - eprintln!( - "[edgezero] warning: failed to initialize git repository: {}", - err - ); + log::warn!("[edgezero] warning: failed to initialize git repository: {err}"); } } } @@ -490,30 +593,33 @@ mod tests { use std::path::Path; use tempfile::TempDir; + // `super::*` re-exports `env` and `fs` from outer `use` lines, so they're + // already in scope here. + struct PathOverride { original: Option, } impl PathOverride { fn prepend(path: &Path) -> Self { - let original = std::env::var("PATH").ok(); + let original = env::var("PATH").ok(); let sep = if cfg!(windows) { ";" } else { ":" }; let prefix = path.to_string_lossy(); let new_path = match &original { Some(existing) if !existing.is_empty() => format!("{prefix}{sep}{existing}"), _ => prefix.into_owned(), }; - std::env::set_var("PATH", &new_path); + env::set_var("PATH", &new_path); Self { original } } } impl Drop for PathOverride { fn drop(&mut self) { - if let Some(ref original) = self.original { - std::env::set_var("PATH", original); + if let Some(original) = &self.original { + env::set_var("PATH", original); } else { - std::env::remove_var("PATH"); + env::remove_var("PATH"); } } } @@ -522,7 +628,7 @@ mod tests { fn generate_new_scaffolds_workspace_layout() { let temp = TempDir::new().expect("temp dir"); let bin_dir = temp.path().join("bin"); - std::fs::create_dir_all(&bin_dir).expect("bin dir"); + fs::create_dir_all(&bin_dir).expect("bin dir"); let git_path = if cfg!(windows) { bin_dir.join("git.cmd") } else { @@ -530,20 +636,18 @@ mod tests { }; if cfg!(windows) { - std::fs::write(&git_path, b"@echo off\r\nexit /b 0\r\n").expect("write git stub"); + fs::write(&git_path, b"@echo off\r\nexit /b 0\r\n").expect("write git stub"); } else { - std::fs::write(&git_path, b"#!/bin/sh\nexit 0\n").expect("write git stub"); + fs::write(&git_path, b"#!/bin/sh\nexit 0\n").expect("write git stub"); } #[cfg(unix)] { - use std::os::unix::fs::PermissionsExt; - let mut perms = std::fs::metadata(&git_path) - .expect("metadata") - .permissions(); + use std::os::unix::fs::PermissionsExt as _; + let mut perms = fs::metadata(&git_path).expect("metadata").permissions(); perms.set_mode(0o755); - std::fs::set_permissions(&git_path, perms).expect("chmod"); - } + fs::set_permissions(&git_path, perms).expect("chmod"); + }; let _path_guard = PathOverride::prepend(&bin_dir); @@ -553,7 +657,7 @@ mod tests { local_core: false, }; - generate_new(args).expect("scaffold succeeds"); + generate_new(&args).expect("scaffold succeeds"); let project_dir = temp.path().join("demo-app"); assert!(project_dir.is_dir(), "project directory created"); @@ -564,7 +668,7 @@ mod tests { assert!(project_dir.join("crates/demo-app-core/src/lib.rs").exists()); let cargo_toml = - std::fs::read_to_string(project_dir.join("Cargo.toml")).expect("read Cargo.toml"); + fs::read_to_string(project_dir.join("Cargo.toml")).expect("read Cargo.toml"); assert!(cargo_toml.contains("crates/demo-app-core")); assert!(cargo_toml.contains("crates/demo-app-adapter-cloudflare")); assert!(cargo_toml.contains("crates/demo-app-adapter-fastly")); @@ -574,7 +678,7 @@ mod tests { ); let manifest = - std::fs::read_to_string(project_dir.join("edgezero.toml")).expect("read edgezero.toml"); + fs::read_to_string(project_dir.join("edgezero.toml")).expect("read edgezero.toml"); assert!(manifest.contains("[adapters.cloudflare.adapter]")); assert!(manifest.contains("[adapters.fastly.adapter]")); assert!( @@ -589,7 +693,29 @@ mod tests { ); let gitignore = - std::fs::read_to_string(project_dir.join(".gitignore")).expect("read .gitignore"); + fs::read_to_string(project_dir.join(".gitignore")).expect("read .gitignore"); assert!(gitignore.contains("target/")); + + let clippy = fs::read_to_string(project_dir.join("clippy.toml")).expect("read clippy.toml"); + assert!(clippy.contains("allow-expect-in-tests = true")); + + assert!(cargo_toml.contains("[workspace.lints.clippy]")); + assert!(cargo_toml.contains("blanket_clippy_restriction_lints = \"allow\"")); + + for crate_dir in [ + "crates/demo-app-core", + "crates/demo-app-adapter-axum", + "crates/demo-app-adapter-cloudflare", + "crates/demo-app-adapter-fastly", + "crates/demo-app-adapter-spin", + ] { + let path = project_dir.join(crate_dir).join("Cargo.toml"); + let body = + fs::read_to_string(&path).unwrap_or_else(|_| panic!("read {}", path.display())); + assert!( + body.contains("[lints]\nworkspace = true"), + "{crate_dir} must inherit workspace lints", + ); + } } } diff --git a/crates/edgezero-cli/src/main.rs b/crates/edgezero-cli/src/main.rs index e5c7ae4..953ac1c 100644 --- a/crates/edgezero-cli/src/main.rs +++ b/crates/edgezero-cli/src/main.rs @@ -1,4 +1,4 @@ -//! EdgeZero CLI. +//! `EdgeZero` CLI. #[cfg(feature = "cli")] mod adapter; @@ -14,21 +14,39 @@ mod scaffold; #[cfg(feature = "cli")] use edgezero_core::manifest::ManifestLoader; #[cfg(feature = "cli")] +use std::env; +#[cfg(feature = "cli")] use std::io::ErrorKind; #[cfg(feature = "cli")] use std::path::PathBuf; +#[cfg(feature = "cli")] +use std::process; + +/// Initialize a CLI logger that prints messages without timestamps or level +/// prefixes — the CLI's output IS the user-facing UX, not a debug log. +#[cfg(feature = "cli")] +fn init_cli_logger() { + use log::LevelFilter; + use simple_logger::SimpleLogger; + let _logger_init = SimpleLogger::new() + .with_level(LevelFilter::Info) + .without_timestamps() + .with_module_level("edgezero_cli", LevelFilter::Info) + .init(); +} #[cfg(feature = "cli")] fn main() { use args::{Args, Command}; - use clap::Parser; + use clap::Parser as _; + init_cli_logger(); let args = Args::parse(); match args.cmd { Command::New(new_args) => { - if let Err(e) = generator::generate_new(new_args) { - eprintln!("[edgezero] new error: {e}"); - std::process::exit(1); + if let Err(err) = generator::generate_new(&new_args) { + log::error!("[edgezero] new error: {err}"); + process::exit(1); } } Command::Build { @@ -36,8 +54,8 @@ fn main() { adapter_args, } => { if let Err(err) = handle_build(&adapter, &adapter_args) { - eprintln!("[edgezero] build error: {err}"); - std::process::exit(1); + log::error!("[edgezero] build error: {err}"); + process::exit(1); } } Command::Deploy { @@ -45,14 +63,14 @@ fn main() { adapter_args, } => { if let Err(err) = handle_deploy(&adapter, &adapter_args) { - eprintln!("[edgezero] deploy error: {err}"); - std::process::exit(1); + log::error!("[edgezero] deploy error: {err}"); + process::exit(1); } } Command::Serve { adapter } => { if let Err(err) = handle_serve(&adapter) { - eprintln!("[edgezero] serve error: {err}"); - std::process::exit(1); + log::error!("[edgezero] serve error: {err}"); + process::exit(1); } } Command::Dev => { @@ -63,10 +81,10 @@ fn main() { #[cfg(not(feature = "edgezero-adapter-axum"))] { - eprintln!( + log::error!( "edgezero-cli built without `edgezero-adapter-axum`; rebuild with that feature to use `edgezero dev`." ); - std::process::exit(1); + process::exit(1); } } } @@ -74,17 +92,23 @@ fn main() { #[cfg(not(feature = "cli"))] fn main() { - eprintln!("edgezero-cli built without `cli` feature. Rebuild with `--features cli`."); + use log::LevelFilter; + use simple_logger::SimpleLogger; + let _logger_init = SimpleLogger::new() + .with_level(LevelFilter::Error) + .without_timestamps() + .init(); + log::error!("edgezero-cli built without `cli` feature. Rebuild with `--features cli`."); } #[cfg(feature = "cli")] fn store_bindings_message(adapter_name: &str, manifest: &ManifestLoader) -> Option { - let m = manifest.manifest(); - if !m.secret_store_enabled(adapter_name) { + let manifest_data = manifest.manifest(); + if !manifest_data.secret_store_enabled(adapter_name) { return None; } - let binding_name = m.secret_store_name(adapter_name); + let binding_name = manifest_data.secret_store_binding(adapter_name); let message = match adapter_name { "axum" => format!( "[edgezero] secrets enabled for axum -- ensure the required environment variables are set for local runs (configured store name: '{binding_name}')" @@ -103,7 +127,7 @@ fn store_bindings_message(adapter_name: &str, manifest: &ManifestLoader) -> Opti #[cfg(feature = "cli")] fn log_store_bindings(adapter_name: &str, manifest: &ManifestLoader) { if let Some(message) = store_bindings_message(adapter_name, manifest) { - println!("{message}"); + log::info!("{message}"); } } @@ -111,8 +135,8 @@ fn log_store_bindings(adapter_name: &str, manifest: &ManifestLoader) { fn handle_build(adapter_name: &str, adapter_args: &[String]) -> Result<(), String> { let manifest = load_manifest_optional()?; ensure_adapter_defined(adapter_name, manifest.as_ref())?; - if let Some(ref m) = manifest { - log_store_bindings(adapter_name, m); + if let Some(loader) = &manifest { + log_store_bindings(adapter_name, loader); } adapter::execute( adapter_name, @@ -138,28 +162,22 @@ fn handle_deploy(adapter_name: &str, adapter_args: &[String]) -> Result<(), Stri fn handle_serve(adapter_name: &str) -> Result<(), String> { let manifest = load_manifest_optional()?; ensure_adapter_defined(adapter_name, manifest.as_ref())?; - adapter::execute( - adapter_name, - adapter::Action::Serve, - manifest.as_ref(), - &[] as &[String], - ) + adapter::execute(adapter_name, adapter::Action::Serve, manifest.as_ref(), &[]) } #[cfg(feature = "cli")] fn ensure_adapter_defined( adapter_name: &str, - manifest: Option<&ManifestLoader>, + manifest_loader: Option<&ManifestLoader>, ) -> Result<(), String> { - if let Some(manifest) = manifest { - if manifest.manifest().adapters.contains_key(adapter_name) { + if let Some(loader) = manifest_loader { + if loader.manifest().adapters.contains_key(adapter_name) { return Ok(()); } - let available: Vec = manifest.manifest().adapters.keys().cloned().collect(); + let available: Vec = loader.manifest().adapters.keys().cloned().collect(); if available.is_empty() { Err(format!( - "adapter `{}` is not configured in edgezero.toml (no adapters defined)", - adapter_name + "adapter `{adapter_name}` is not configured in edgezero.toml (no adapters defined)" )) } else { Err(format!( @@ -175,9 +193,8 @@ fn ensure_adapter_defined( #[cfg(feature = "cli")] fn load_manifest_optional() -> Result, String> { - let path = std::env::var("EDGEZERO_MANIFEST") - .map(PathBuf::from) - .unwrap_or_else(|_| PathBuf::from("edgezero.toml")); + let path = env::var("EDGEZERO_MANIFEST") + .map_or_else(|_| PathBuf::from("edgezero.toml"), PathBuf::from); match ManifestLoader::from_path(&path) { Ok(loader) => Ok(Some(loader)), @@ -186,7 +203,8 @@ fn load_manifest_optional() -> Result, String> { } } -#[cfg(all(test, feature = "cli"))] +#[cfg(test)] +#[cfg(feature = "cli")] mod tests { use super::*; use edgezero_core::manifest::ManifestLoader; @@ -213,32 +231,32 @@ deploy = "echo deploy" serve = "echo serve" "#; - fn manifest_guard() -> &'static Mutex<()> { - static GUARD: OnceLock> = OnceLock::new(); - GUARD.get_or_init(|| Mutex::new(())) - } - struct EnvOverride { key: &'static str, original: Option, } + impl Drop for EnvOverride { + fn drop(&mut self) { + if let Some(original) = &self.original { + env::set_var(self.key, original); + } else { + env::remove_var(self.key); + } + } + } + impl EnvOverride { fn set(key: &'static str, value: &str) -> Self { - let original = std::env::var(key).ok(); - std::env::set_var(key, value); + let original = env::var(key).ok(); + env::set_var(key, value); Self { key, original } } } - impl Drop for EnvOverride { - fn drop(&mut self) { - if let Some(ref original) = self.original { - std::env::set_var(self.key, original); - } else { - std::env::remove_var(self.key); - } - } + fn manifest_guard() -> &'static Mutex<()> { + static GUARD: OnceLock> = OnceLock::new(); + GUARD.get_or_init(|| Mutex::new(())) } #[test] @@ -269,7 +287,7 @@ serve = "echo serve" #[test] fn ensure_adapter_defined_accepts_known_adapter() { let loader = ManifestLoader::load_from_str(BASIC_MANIFEST); - assert!(ensure_adapter_defined("fastly", Some(&loader)).is_ok()); + ensure_adapter_defined("fastly", Some(&loader)).expect("known adapter"); } #[test] @@ -282,7 +300,7 @@ serve = "echo serve" #[test] fn ensure_adapter_defined_allows_when_manifest_missing() { - assert!(ensure_adapter_defined("fastly", None).is_ok()); + ensure_adapter_defined("fastly", None).expect("manifest missing -> permissive"); } #[cfg(not(windows))] @@ -324,7 +342,7 @@ serve = "echo serve" } #[test] - fn secret_store_name_is_readable_from_manifest() { + fn secret_store_binding_is_readable_from_manifest() { let manifest_with_secrets = r#" [app] name = "demo-app" @@ -339,7 +357,10 @@ deploy = "echo deploy" serve = "echo serve" "#; let loader = ManifestLoader::load_from_str(manifest_with_secrets); - assert_eq!(loader.manifest().secret_store_name("fastly"), "MY_SECRETS"); + assert_eq!( + loader.manifest().secret_store_binding("fastly"), + "MY_SECRETS" + ); assert!(loader.manifest().stores.secrets.is_some()); } @@ -365,10 +386,10 @@ name = "MY_SECRETS" #[test] fn store_bindings_message_respects_secret_store_enabled() { let loader = ManifestLoader::load_from_str( - r#" + " [stores.secrets] enabled = false -"#, +", ); assert!(store_bindings_message("fastly", &loader).is_none()); } diff --git a/crates/edgezero-cli/src/scaffold.rs b/crates/edgezero-cli/src/scaffold.rs index 24cf2ef..714ac5e 100644 --- a/crates/edgezero-cli/src/scaffold.rs +++ b/crates/edgezero-cli/src/scaffold.rs @@ -1,44 +1,100 @@ use edgezero_adapter::scaffold; use handlebars::Handlebars; +use std::fs; +use std::io; +use std::path::{Path, PathBuf}; +use thiserror::Error; +pub struct ResolvedDependency { + pub crate_line: String, + pub name: String, + pub workspace_line: String, +} + +/// Errors produced while scaffolding files for a generated project. +#[derive(Debug, Error)] +pub enum ScaffoldError { + /// Failed to read or write a path on disk while emitting a template. + #[error("scaffold io error at {path}: {source}")] + Io { + path: PathBuf, + #[source] + source: io::Error, + }, + /// The Handlebars renderer rejected the template or its data. + #[error("template '{name}' failed to render: {message}")] + Render { message: String, name: String }, +} + +impl ScaffoldError { + pub(crate) fn io(path: impl Into, source: io::Error) -> Self { + ScaffoldError::Io { + path: path.into(), + source, + } + } +} + +fn crate_name_from_repo_path(path: &str) -> &str { + Path::new(path) + .file_name() + .and_then(|name| name.to_str()) + .unwrap_or(path) +} + +/// Registers all compile-time-embedded templates. +/// +/// Each `register_template_string` call uses `.expect(..)` because the inputs +/// are static strings via `include_str!` — failure can only happen if the +/// template source itself has invalid Handlebars syntax, which is a +/// build-time programmer error caught the moment the binary is run. +#[expect( + clippy::expect_used, + reason = "compile-time-embedded templates: parse failure is a build bug" +)] pub fn register_templates(hbs: &mut Handlebars) { // Root hbs.register_template_string( "root_Cargo_toml", include_str!("templates/root/Cargo.toml.hbs"), ) - .unwrap(); + .expect("compiled-in template is valid"); hbs.register_template_string( "root_edgezero_toml", include_str!("templates/root/edgezero.toml.hbs"), ) - .unwrap(); + .expect("compiled-in template is valid"); hbs.register_template_string( "root_README_md", include_str!("templates/root/README.md.hbs"), ) - .unwrap(); + .expect("compiled-in template is valid"); hbs.register_template_string( "root_gitignore", include_str!("templates/root/gitignore.hbs"), ) - .unwrap(); + .expect("compiled-in template is valid"); + hbs.register_template_string( + "root_clippy_toml", + include_str!("templates/root/clippy.toml.hbs"), + ) + .expect("compiled-in template is valid"); // Core hbs.register_template_string( "core_Cargo_toml", include_str!("templates/core/Cargo.toml.hbs"), ) - .unwrap(); + .expect("compiled-in template is valid"); hbs.register_template_string( "core_src_lib_rs", include_str!("templates/core/src/lib.rs.hbs"), ) - .unwrap(); + .expect("compiled-in template is valid"); hbs.register_template_string( "core_src_handlers_rs", include_str!("templates/core/src/handlers.rs.hbs"), ) - .unwrap(); + .expect("compiled-in template is valid"); // Adapter-specific templates for adapter in scaffold::registered_blueprints() { for template in adapter.template_registrations { @@ -48,65 +104,42 @@ pub fn register_templates(hbs: &mut Handlebars) { } } -pub fn write_tmpl( - hbs: &handlebars::Handlebars, - name: &str, - data: &serde_json::Value, - out_path: &std::path::Path, -) -> std::io::Result<()> { - if let Some(parent) = out_path.parent() { - std::fs::create_dir_all(parent)?; +pub fn relative_to(from: &Path, to: &Path) -> Option { + let from_abs = fs::canonicalize(from).ok()?; + let to_abs = fs::canonicalize(to).ok()?; + let suffix = from_abs.strip_prefix(&to_abs).ok()?; + let depth = suffix.components().count(); + if depth == 0 { + return Some(".".into()); } - let rendered = hbs - .render(name, data) - .map_err(|e| std::io::Error::other(e.to_string()))?; - std::fs::write(out_path, rendered) -} - -pub fn sanitize_crate_name(input: &str) -> String { - let mut out = String::new(); - for (i, ch) in input.chars().enumerate() { - let valid = ch.is_ascii_lowercase() || ch.is_ascii_digit() || ch == '-' || ch == '_'; - if valid { - if i == 0 && ch.is_ascii_digit() { - out.push('_'); - } - out.push(ch); - } else { - out.push('-'); + let mut ups = String::new(); + for _ in 0..depth { + if !ups.is_empty() { + ups.push('/'); } + ups.push_str(".."); } - if out.is_empty() { - "edgezero-app".to_string() - } else { - out - } -} - -pub struct ResolvedDependency { - pub name: String, - pub workspace_line: String, - pub crate_line: String, + Some(ups) } pub fn resolve_dep_line( - workspace_dir: &std::path::Path, - repo_root: &std::path::Path, + workspace_dir: &Path, + repo_root: &Path, repo_rel_crate: &str, fallback: &str, features: &[&str], ) -> ResolvedDependency { - let crate_name = crate_name_from_repo_path(repo_rel_crate).to_string(); + let crate_name = crate_name_from_repo_path(repo_rel_crate).to_owned(); let candidate = repo_root.join(repo_rel_crate); let workspace_line = if candidate.exists() { if let Some(rel) = relative_to(workspace_dir, repo_root) { - let dep_path = std::path::Path::new(&rel).join(repo_rel_crate); + let dep_path = Path::new(&rel).join(repo_rel_crate); format!("{} = {{ path = \"{}\" }}", crate_name, dep_path.display()) } else { - fallback.to_string() + fallback.to_owned() } } else { - fallback.to_string() + fallback.to_owned() }; let feature_fragment = if features.is_empty() { @@ -114,47 +147,60 @@ pub fn resolve_dep_line( } else { let joined = features .iter() - .map(|f| format!("\"{}\"", f)) + .map(|feat| format!("\"{feat}\"")) .collect::>() .join(", "); - format!(", features = [{}]", joined) + format!(", features = [{joined}]") }; - let crate_line = format!( - "{} = {{ workspace = true{} }}", - crate_name, feature_fragment - ); + let crate_line = format!("{crate_name} = {{ workspace = true{feature_fragment} }}"); ResolvedDependency { + crate_line, name: crate_name, workspace_line, - crate_line, } } -fn crate_name_from_repo_path(p: &str) -> &str { - std::path::Path::new(p) - .file_name() - .and_then(|s| s.to_str()) - .unwrap_or(p) +pub fn sanitize_crate_name(input: &str) -> String { + let mut out = String::new(); + for (i, ch) in input.chars().enumerate() { + let valid = ch.is_ascii_lowercase() || ch.is_ascii_digit() || ch == '-' || ch == '_'; + if valid { + if i == 0 && ch.is_ascii_digit() { + out.push('_'); + } + out.push(ch); + } else { + out.push('-'); + } + } + if out.is_empty() { + "edgezero-app".to_owned() + } else { + out + } } -pub fn relative_to(from: &std::path::Path, to: &std::path::Path) -> Option { - let from_abs = std::fs::canonicalize(from).ok()?; - let to_abs = std::fs::canonicalize(to).ok()?; - let suffix = from_abs.strip_prefix(&to_abs).ok()?; - let depth = suffix.components().count(); - if depth == 0 { - return Some(".".into()); - } - let mut ups = String::new(); - for i in 0..depth { - let _ = i; - if !ups.is_empty() { - ups.push('/'); - } - ups.push_str(".."); +/// # Errors +/// Returns [`ScaffoldError::Io`] if the parent directory cannot be created +/// or the rendered template cannot be written; [`ScaffoldError::Render`] if +/// Handlebars rejects the template or its data. +pub fn write_tmpl( + hbs: &handlebars::Handlebars, + name: &str, + data: &serde_json::Value, + out_path: &Path, +) -> Result<(), ScaffoldError> { + if let Some(parent) = out_path.parent() { + fs::create_dir_all(parent).map_err(|err| ScaffoldError::io(parent, err))?; } - Some(ups) + let rendered = hbs + .render(name, data) + .map_err(|err| ScaffoldError::Render { + message: err.to_string(), + name: name.to_owned(), + })?; + fs::write(out_path, rendered).map_err(|err| ScaffoldError::io(out_path, err)) } #[cfg(test)] @@ -172,6 +218,7 @@ mod tests { "root_edgezero_toml", "root_README_md", "root_gitignore", + "root_clippy_toml", "core_Cargo_toml", "core_src_lib_rs", "core_src_handlers_rs", diff --git a/crates/edgezero-cli/src/templates/core/Cargo.toml.hbs b/crates/edgezero-cli/src/templates/core/Cargo.toml.hbs index 4dc4f0a..17395d8 100644 --- a/crates/edgezero-cli/src/templates/core/Cargo.toml.hbs +++ b/crates/edgezero-cli/src/templates/core/Cargo.toml.hbs @@ -4,6 +4,9 @@ version = "0.1.0" edition = "2021" publish = false +[lints] +workspace = true + [dependencies] bytes = { workspace = true } {{{dep_edgezero_core}}} diff --git a/crates/edgezero-cli/src/templates/root/Cargo.toml.hbs b/crates/edgezero-cli/src/templates/root/Cargo.toml.hbs index 1b637bd..b8ebff1 100644 --- a/crates/edgezero-cli/src/templates/root/Cargo.toml.hbs +++ b/crates/edgezero-cli/src/templates/root/Cargo.toml.hbs @@ -12,3 +12,33 @@ resolver = "2" debug = 1 codegen-units = 1 lto = "fat" + +[workspace.lints.clippy] +# Strict gate matching the EdgeZero workspace. The allow-list below tracks +# the entries the EdgeZero demo legitimately needs — extend it lazily when +# a real failure surfaces in your generated code. +pedantic = { level = "warn", priority = -1 } +restriction = { level = "deny", priority = -1 } + +# Meta — required when enabling `restriction` as a group. +blanket_clippy_restriction_lints = "allow" + +# Documentation — private items don't need full docs in app code. +missing_docs_in_private_items = "allow" + +# Style / formatting — match idiomatic Rust conventions. +implicit_return = "allow" +question_mark_used = "allow" +single_call_fn = "allow" +separated_literal_suffix = "allow" + +# API design — `exhaustive_structs` fires on the unit struct generated by +# `edgezero_core::app!`. +exhaustive_structs = "allow" + +# Imports / paths — generated binaries are std applications, not no_std libraries. +std_instead_of_alloc = "allow" +std_instead_of_core = "allow" + +[workspace.lints.rust] +unsafe_code = "deny" diff --git a/crates/edgezero-cli/src/templates/root/clippy.toml.hbs b/crates/edgezero-cli/src/templates/root/clippy.toml.hbs new file mode 100644 index 0000000..36e6164 --- /dev/null +++ b/crates/edgezero-cli/src/templates/root/clippy.toml.hbs @@ -0,0 +1,10 @@ +# Clippy configuration. See https://doc.rust-lang.org/clippy/lint_configuration.html +# +# Test code uses `.unwrap()`, `.expect()`, `panic!`, `assert!`, indexing, and +# other "if-this-fails-the-test-fails" idioms by convention. Mirror the +# EdgeZero workspace policy and exempt tests from the corresponding +# restriction lints. +allow-expect-in-tests = true +allow-indexing-slicing-in-tests = true +allow-panic-in-tests = true +allow-unwrap-in-tests = true diff --git a/crates/edgezero-core/Cargo.toml b/crates/edgezero-core/Cargo.toml index 1bbf05b..8e53131 100644 --- a/crates/edgezero-core/Cargo.toml +++ b/crates/edgezero-core/Cargo.toml @@ -5,6 +5,9 @@ version = { workspace = true } authors = { workspace = true } license = { workspace = true } +[lints] +workspace = true + [dependencies] edgezero-macros = { path = "../edgezero-macros" } anyhow = { workspace = true } diff --git a/crates/edgezero-core/src/app.rs b/crates/edgezero-core/src/app.rs index 0be7d72..150a115 100644 --- a/crates/edgezero-core/src/app.rs +++ b/crates/edgezero-core/src/app.rs @@ -1,95 +1,59 @@ use crate::router::RouterService; -const DEFAULT_APP_NAME: &str = "EdgeZero App"; - /// Canonical adapter name for the Axum adapter. pub const AXUM_ADAPTER: &str = "axum"; /// Canonical adapter name for the Cloudflare adapter. pub const CLOUDFLARE_ADAPTER: &str = "cloudflare"; +const DEFAULT_APP_NAME: &str = "EdgeZero App"; /// Canonical adapter name for the Fastly adapter. pub const FASTLY_ADAPTER: &str = "fastly"; /// Canonical adapter name for the Spin adapter. pub const SPIN_ADAPTER: &str = "spin"; -/// Adapter-specific config-store override metadata generated from `[stores.config.adapters.*]`. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct ConfigStoreAdapterMetadata { - adapter: &'static str, - name: &'static str, -} - -impl ConfigStoreAdapterMetadata { - pub const fn new(adapter: &'static str, name: &'static str) -> Self { - Self { adapter, name } - } - - pub fn adapter(&self) -> &'static str { - self.adapter - } - - pub fn name(&self) -> &'static str { - self.name - } -} - -/// Provider-neutral config-store metadata generated from `[stores.config]`. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct ConfigStoreMetadata { - default_name: &'static str, - adapters: &'static [ConfigStoreAdapterMetadata], +/// Lightweight container around a `RouterService` that can be extended via hook implementations. +pub struct App { + name: String, + router: RouterService, } -impl ConfigStoreMetadata { - pub const fn new( - default_name: &'static str, - adapters: &'static [ConfigStoreAdapterMetadata], - ) -> Self { - Self { - default_name, - adapters, - } - } - - pub fn default_name(&self) -> &'static str { - self.default_name +impl App { + /// Default name used when none is provided. + #[must_use] + #[inline] + pub fn default_name() -> &'static str { + DEFAULT_APP_NAME } - pub fn adapters(&self) -> &'static [ConfigStoreAdapterMetadata] { - self.adapters + /// Consume the app and return the contained router service. + #[must_use] + #[inline] + pub fn into_router(self) -> RouterService { + self.router } - pub fn name_for_adapter(&self, adapter: &str) -> &'static str { - self.adapters - .iter() - .find(|entry| entry.adapter.eq_ignore_ascii_case(adapter)) - .map(|entry| entry.name) - .unwrap_or(self.default_name) + /// Name assigned to the application. + #[must_use] + #[inline] + pub fn name(&self) -> &str { + &self.name } -} -/// Lightweight container around a `RouterService` that can be extended via hook implementations. -pub struct App { - router: RouterService, - name: String, -} - -impl App { /// Create a new application wrapper from the supplied router service. + #[must_use] + #[inline] pub fn new(router: RouterService) -> Self { Self::with_name(router, DEFAULT_APP_NAME) } /// Access the underlying router service. + #[must_use] + #[inline] pub fn router(&self) -> &RouterService { &self.router } - /// Name assigned to the application. - pub fn name(&self) -> &str { - &self.name - } - /// Update the application name. + #[inline] pub fn set_name(&mut self, name: S) where S: Into, @@ -97,12 +61,8 @@ impl App { self.name = name.into(); } - /// Consume the app and return the contained router service. - pub fn into_router(self) -> RouterService { - self.router - } - /// Construct a new application with the provided router and name. + #[inline] pub fn with_name(router: RouterService, name: S) -> Self where S: Into, @@ -112,35 +72,82 @@ impl App { name: name.into(), } } +} - /// Default name used when none is provided. - pub fn default_name() -> &'static str { - DEFAULT_APP_NAME +/// Adapter-specific config-store override metadata generated from `[stores.config.adapters.*]`. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct ConfigStoreAdapterMetadata { + adapter: &'static str, + name: &'static str, +} + +impl ConfigStoreAdapterMetadata { + #[must_use] + #[inline] + pub fn adapter(&self) -> &'static str { + self.adapter + } + + #[must_use] + #[inline] + pub fn name(&self) -> &'static str { + self.name + } + + #[must_use] + #[inline] + pub const fn new(adapter: &'static str, name: &'static str) -> Self { + Self { adapter, name } } } -/// Trait implemented by application hook adapters. -pub trait Hooks { - /// Allow implementations to mutate the freshly constructed application before use. - /// The default implementation performs no changes. - fn configure(_app: &mut App) {} +/// Provider-neutral config-store metadata generated from `[stores.config]`. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct ConfigStoreMetadata { + adapters: &'static [ConfigStoreAdapterMetadata], + default_name: &'static str, +} - /// Build the router service for the application. - fn routes() -> RouterService; +impl ConfigStoreMetadata { + #[must_use] + #[inline] + pub fn adapters(&self) -> &'static [ConfigStoreAdapterMetadata] { + self.adapters + } - /// Display name for the application. Defaults to `"EdgeZero App"`. - fn name() -> &'static str { - App::default_name() + #[must_use] + #[inline] + pub fn default_name(&self) -> &'static str { + self.default_name } - /// Structured config-store metadata for the application, if declared. - /// - /// Macro-generated apps derive this from `[stores.config]` in `edgezero.toml`. - fn config_store() -> Option<&'static ConfigStoreMetadata> { - None + #[must_use] + #[inline] + pub fn name_for_adapter(&self, adapter: &str) -> &'static str { + self.adapters + .iter() + .find(|entry| entry.adapter.eq_ignore_ascii_case(adapter)) + .map_or(self.default_name, |entry| entry.name) + } + + #[must_use] + #[inline] + pub const fn new( + default_name: &'static str, + adapters: &'static [ConfigStoreAdapterMetadata], + ) -> Self { + Self { + adapters, + default_name, + } } +} +/// Trait implemented by application hook adapters. +pub trait Hooks { /// Construct an `App` by wiring the routes and invoking the configuration hook. + #[must_use] + #[inline] fn build_app() -> App where Self: Sized, @@ -149,6 +156,30 @@ pub trait Hooks { Self::configure(&mut app); app } + + /// Structured config-store metadata for the application, if declared. + /// + /// Macro-generated apps derive this from `[stores.config]` in `edgezero.toml`. + #[must_use] + #[inline] + fn config_store() -> Option<&'static ConfigStoreMetadata> { + None + } + + /// Allow implementations to mutate the freshly constructed application before use. + /// The default implementation performs no changes. + #[inline] + fn configure(_app: &mut App) {} + + /// Display name for the application. Defaults to `"EdgeZero App"`. + #[must_use] + #[inline] + fn name() -> &'static str { + App::default_name() + } + + /// Build the router service for the application. + fn routes() -> RouterService; } #[cfg(test)] @@ -159,35 +190,39 @@ mod tests { use crate::error::EdgeError; use crate::http::{request_builder, Method, StatusCode}; use futures::executor::block_on; - use tower_service::Service; - - fn empty_router() -> RouterService { - RouterService::builder().build() - } + use tower_service::Service as _; - #[test] - fn default_app_uses_constant_name() { - let app = App::new(empty_router()); - assert_eq!(app.name(), App::default_name()); - } + struct DefaultHooks; struct TestHooks; - impl Hooks for TestHooks { - fn routes() -> RouterService { - async fn handler(_ctx: RequestContext) -> Result { - Ok("ok".to_string()) - } - - RouterService::builder().get("/test", handler).build() + impl Hooks for DefaultHooks { + fn build_app() -> App { + let mut app = App::with_name(Self::routes(), Self::name()); + Self::configure(&mut app); + app } - fn configure(app: &mut App) { - app.set_name("configured"); + fn config_store() -> Option<&'static ConfigStoreMetadata> { + None } + fn configure(_app: &mut App) {} + fn name() -> &'static str { - "hooks-name" + App::default_name() + } + + fn routes() -> RouterService { + RouterService::builder().build() + } + } + + impl Hooks for TestHooks { + fn build_app() -> App { + let mut app = App::with_name(Self::routes(), Self::name()); + Self::configure(&mut app); + app } fn config_store() -> Option<&'static ConfigStoreMetadata> { @@ -200,6 +235,26 @@ mod tests { ); Some(&CONFIG_STORE) } + + fn configure(app: &mut App) { + app.set_name("configured"); + } + + fn name() -> &'static str { + "hooks-name" + } + + fn routes() -> RouterService { + async fn handler(_ctx: RequestContext) -> Result { + Ok("ok".to_owned()) + } + + RouterService::builder().get("/test", handler).build() + } + } + + fn empty_router() -> RouterService { + RouterService::builder().build() } #[test] @@ -221,15 +276,13 @@ mod tests { let response = block_on(app.router().clone().call(request)).expect("response"); assert_eq!(response.status(), StatusCode::OK); - assert_eq!(response.body().as_bytes(), b"ok"); + assert_eq!(response.body().as_bytes().expect("buffered"), b"ok"); } - struct DefaultHooks; - - impl Hooks for DefaultHooks { - fn routes() -> RouterService { - RouterService::builder().build() - } + #[test] + fn default_app_uses_constant_name() { + let app = App::new(empty_router()); + assert_eq!(app.name(), App::default_name()); } #[test] diff --git a/crates/edgezero-core/src/body.rs b/crates/edgezero-core/src/body.rs index f933bae..33934a2 100644 --- a/crates/edgezero-core/src/body.rs +++ b/crates/edgezero-core/src/body.rs @@ -6,6 +6,8 @@ use futures_util::stream::{LocalBoxStream, Stream, StreamExt}; use serde::de::DeserializeOwned; use serde::Serialize; +use crate::error::EdgeError; + /// Lightweight HTTP body that can either contain a single `Bytes` buffer or a streaming source of /// chunks. The streaming variant is implemented with `LocalBoxStream` so it remains compatible with /// `wasm32` targets that lack thread support. @@ -15,10 +17,24 @@ pub enum Body { } impl Body { + /// Returns the in-memory bytes for a buffered body, or `None` if this is + /// a streaming body. To consume a streaming body into bytes, use + /// [`Body::into_bytes_bounded`]. + #[inline] + pub fn as_bytes(&self) -> Option<&[u8]> { + match self { + Body::Once(bytes) => Some(bytes.as_ref()), + Body::Stream(_) => None, + } + } + + #[must_use] + #[inline] pub fn empty() -> Self { Self::from_bytes(Bytes::new()) } + #[inline] pub fn from_bytes(bytes: B) -> Self where B: Into, @@ -26,6 +42,7 @@ impl Body { Self::Once(bytes.into()) } + #[inline] pub fn from_stream(stream: S) -> Self where S: Stream> + 'static, @@ -38,27 +55,47 @@ impl Body { ) } - pub fn stream(stream: S) -> Self - where - S: Stream + 'static, - { - Self::Stream(stream.map(Ok::).boxed_local()) - } - - pub fn as_bytes(&self) -> &[u8] { + /// Consume a buffered body and return its bytes, or `None` if this is a + /// streaming body. To collect a streaming body, use + /// [`Body::into_bytes_bounded`]. + #[inline] + pub fn into_bytes(self) -> Option { match self { - Body::Once(bytes) => bytes.as_ref(), - Body::Stream(_) => panic!("streaming body does not expose in-memory bytes"), + Body::Once(bytes) => Some(bytes), + Body::Stream(_) => None, } } - pub fn into_bytes(self) -> Bytes { + /// Drain the body into a single `Bytes` buffer, enforcing `max_size`. + /// + /// Works for both buffered and streaming variants. + /// + /// # Errors + /// Returns [`EdgeError::bad_request`] if the body exceeds `max_size` bytes; or [`EdgeError::internal`] if the upstream stream errors. + #[inline] + pub async fn into_bytes_bounded(self, max_size: usize) -> Result { match self { - Body::Once(bytes) => bytes, - Body::Stream(_) => panic!("streaming body cannot be converted into bytes"), + Body::Once(bytes) => { + if bytes.len() > max_size { + return Err(EdgeError::bad_request("request body too large")); + } + Ok(bytes) + } + Body::Stream(mut stream) => { + let mut buf = Vec::new(); + while let Some(result) = StreamExt::next(&mut stream).await { + let chunk = result.map_err(EdgeError::internal)?; + buf.extend_from_slice(&chunk); + if buf.len() > max_size { + return Err(EdgeError::bad_request("request body too large")); + } + } + Ok(Bytes::from(buf)) + } } } + #[inline] pub fn into_stream(self) -> Option>> { match self { Body::Once(_) => None, @@ -66,56 +103,40 @@ impl Body { } } + #[inline] pub fn is_stream(&self) -> bool { matches!(self, Body::Stream(_)) } - /// Drain the body into a single `Bytes` buffer, enforcing `max_size`. - /// - /// Works for both buffered and streaming variants. Returns an error if - /// the body exceeds `max_size` bytes. - pub async fn into_bytes_bounded( - self, - max_size: usize, - ) -> Result { - if self.is_stream() { - let mut stream = self.into_stream().expect("checked is_stream"); - let mut buf = Vec::new(); - while let Some(chunk) = StreamExt::next(&mut stream).await { - let chunk = chunk.map_err(crate::error::EdgeError::internal)?; - buf.extend_from_slice(&chunk); - if buf.len() > max_size { - return Err(crate::error::EdgeError::bad_request( - "request body too large", - )); - } - } - Ok(Bytes::from(buf)) - } else { - let bytes = self.into_bytes(); - if bytes.len() > max_size { - return Err(crate::error::EdgeError::bad_request( - "request body too large", - )); - } - Ok(bytes) - } + /// # Errors + /// Returns the underlying [`serde_json::Error`] if `value` cannot be serialized. + #[inline] + pub fn json(value: &T) -> Result + where + T: Serialize, + { + serde_json::to_vec(value).map(Self::from_bytes) } - pub fn text(text: S) -> Self + #[inline] + pub fn stream(stream: S) -> Self where - S: Into, + S: Stream + 'static, { - Self::from_bytes(text.into().into_bytes()) + Self::Stream(stream.map(Ok::).boxed_local()) } - pub fn json(value: &T) -> Result + #[inline] + pub fn text(text: S) -> Self where - T: Serialize, + S: Into, { - serde_json::to_vec(value).map(Self::from_bytes) + Self::from_bytes(text.into().into_bytes()) } + /// # Errors + /// Returns [`serde_json::Error`] if the body is streaming or its bytes are not valid JSON for `T`. + #[inline] pub fn to_json(&self) -> Result where T: DeserializeOwned, @@ -130,12 +151,14 @@ impl Body { } impl Default for Body { + #[inline] fn default() -> Self { Self::empty() } } impl fmt::Debug for Body { + #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Body::Once(bytes) => f @@ -148,24 +171,28 @@ impl fmt::Debug for Body { } impl From> for Body { + #[inline] fn from(value: Vec) -> Self { Body::from_bytes(value) } } impl From<&[u8]> for Body { + #[inline] fn from(value: &[u8]) -> Self { Body::from_bytes(Bytes::copy_from_slice(value)) } } impl From<&str> for Body { + #[inline] fn from(value: &str) -> Self { Body::text(value) } } impl From for Body { + #[inline] fn from(value: String) -> Self { Body::text(value) } @@ -175,12 +202,18 @@ impl From for Body { mod tests { use super::*; use futures::executor::block_on; - use futures_util::StreamExt; + use futures_util::stream; use std::io; + #[test] + fn as_bytes_returns_none_for_stream() { + let body = Body::stream(stream::iter(vec![Bytes::from_static(b"data")])); + assert!(body.as_bytes().is_none()); + } + #[test] fn collect_stream_body() { - let body = Body::stream(futures_util::stream::iter(vec![ + let body = Body::stream(stream::iter(vec![ Bytes::from_static(b"a"), Bytes::from_static(b"b"), ])); @@ -188,8 +221,8 @@ mod tests { let mut stream = body.into_stream().expect("stream"); let collected = block_on(async { let mut data = Vec::new(); - while let Some(chunk) = stream.next().await { - let chunk = chunk.expect("chunk"); + while let Some(result) = stream.next().await { + let chunk = result.expect("chunk"); data.extend_from_slice(&chunk); } data @@ -197,17 +230,34 @@ mod tests { assert_eq!(collected, b"ab"); } + #[test] + fn debug_formats_both_body_variants() { + let buffered = Body::from("payload"); + let buffered_debug = format!("{buffered:?}"); + assert!(buffered_debug.contains("Body::Once")); + + let stream = Body::stream(stream::iter(vec![Bytes::from_static(b"chunk")])); + let stream_debug = format!("{stream:?}"); + assert!(stream_debug.contains("Body::Stream")); + } + + #[test] + fn default_body_is_empty() { + let body = Body::default(); + assert!(body.as_bytes().expect("buffered").is_empty()); + } + #[test] fn from_stream_maps_errors() { - let stream = futures_util::stream::iter(vec![ + let source = stream::iter(vec![ Ok(Bytes::from_static(b"ok")), Err(io::Error::other("boom")), ]); - let body = Body::from_stream(stream); - let mut stream = body.into_stream().expect("stream"); + let body = Body::from_stream(source); + let mut chunks = body.into_stream().expect("stream"); let (first, second) = block_on(async { - let first = stream.next().await.expect("first").expect("ok"); - let second = stream.next().await.expect("second"); + let first = chunks.next().await.expect("first").expect("ok"); + let second = chunks.next().await.expect("second"); (first, second) }); assert_eq!(first, Bytes::from_static(b"ok")); @@ -215,68 +265,10 @@ mod tests { assert!(err.to_string().contains("boom")); } - #[test] - fn to_json_fails_for_streaming_body() { - let body = Body::stream(futures_util::stream::iter(vec![ - Bytes::from_static(b"{"), - Bytes::from_static(b"}"), - ])); - assert!(body.to_json::().is_err()); - } - - #[test] - fn into_bytes_panics_for_stream() { - let body = Body::stream(futures_util::stream::iter(vec![Bytes::from_static( - b"data", - )])); - let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| body.into_bytes())); - assert!(result.is_err()); - } - - #[test] - fn as_bytes_panics_for_stream() { - let body = Body::stream(futures_util::stream::iter(vec![Bytes::from_static( - b"data", - )])); - let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| body.as_bytes())); - assert!(result.is_err()); - } - - #[test] - fn into_stream_returns_none_for_buffered_body() { - let body = Body::from("payload"); - assert!(body.into_stream().is_none()); - } - - #[test] - fn is_stream_returns_false_for_buffered_body() { - let body = Body::from("payload"); - assert!(!body.is_stream()); - } - - #[test] - fn default_body_is_empty() { - let body = Body::default(); - assert!(body.as_bytes().is_empty()); - } - - #[test] - fn debug_formats_both_body_variants() { - let buffered = Body::from("payload"); - let buffered_debug = format!("{:?}", buffered); - assert!(buffered_debug.contains("Body::Once")); - - let stream = Body::stream(futures_util::stream::iter(vec![Bytes::from_static( - b"chunk", - )])); - let stream_debug = format!("{:?}", stream); - assert!(stream_debug.contains("Body::Stream")); - } - #[test] fn from_vec_u8_builds_buffered_body() { - let body = Body::from(vec![1u8, 2u8, 3u8]); - assert_eq!(body.as_bytes(), &[1u8, 2u8, 3u8]); + let body = Body::from(vec![1_u8, 2_u8, 3_u8]); + assert_eq!(body.as_bytes().expect("buffered"), &[1_u8, 2_u8, 3_u8]); } #[test] @@ -289,13 +281,12 @@ mod tests { #[test] fn into_bytes_bounded_buffered_too_large() { let body = Body::from("hello"); - let result = block_on(body.into_bytes_bounded(3)); - assert!(result.is_err()); + block_on(body.into_bytes_bounded(3)).expect_err("body exceeds max_size"); } #[test] fn into_bytes_bounded_stream_ok() { - let body = Body::stream(futures_util::stream::iter(vec![ + let body = Body::stream(stream::iter(vec![ Bytes::from_static(b"ab"), Bytes::from_static(b"cd"), ])); @@ -305,11 +296,38 @@ mod tests { #[test] fn into_bytes_bounded_stream_too_large() { - let body = Body::stream(futures_util::stream::iter(vec![ + let body = Body::stream(stream::iter(vec![ Bytes::from_static(b"ab"), Bytes::from_static(b"cd"), ])); - let result = block_on(body.into_bytes_bounded(3)); - assert!(result.is_err()); + block_on(body.into_bytes_bounded(3)).expect_err("stream exceeds max_size"); + } + + #[test] + fn into_bytes_returns_none_for_stream() { + let body = Body::stream(stream::iter(vec![Bytes::from_static(b"data")])); + assert!(body.into_bytes().is_none()); + } + + #[test] + fn into_stream_returns_none_for_buffered_body() { + let body = Body::from("payload"); + assert!(body.into_stream().is_none()); + } + + #[test] + fn is_stream_returns_false_for_buffered_body() { + let body = Body::from("payload"); + assert!(!body.is_stream()); + } + + #[test] + fn to_json_fails_for_streaming_body() { + let body = Body::stream(stream::iter(vec![ + Bytes::from_static(b"{"), + Bytes::from_static(b"}"), + ])); + body.to_json::() + .expect_err("streaming body cannot deserialize as JSON"); } } diff --git a/crates/edgezero-core/src/compression.rs b/crates/edgezero-core/src/compression.rs index e2f882b..ee4bf1a 100644 --- a/crates/edgezero-core/src/compression.rs +++ b/crates/edgezero-core/src/compression.rs @@ -3,14 +3,15 @@ use std::io; use async_compression::futures::bufread::{BrotliDecoder, GzipDecoder}; use async_stream::try_stream; use bytes::Bytes; -use futures::io::{AsyncReadExt, BufReader}; +use futures::io::{AsyncReadExt as _, BufReader}; use futures::stream::Stream; use futures::TryStream; -use futures_util::TryStreamExt; +use futures_util::TryStreamExt as _; const BUFFER_SIZE: usize = 8 * 1024; /// Decode a stream of gzip-compressed chunks into plain bytes. +#[inline] pub fn decode_gzip_stream(stream: S) -> impl Stream> where S: TryStream, Error = io::Error> + Unpin, @@ -18,20 +19,25 @@ where try_stream! { let reader = BufReader::new(stream.into_async_read()); let mut decoder = GzipDecoder::new(reader); - let mut buffer = vec![0u8; BUFFER_SIZE]; + let mut buffer = vec![0_u8; BUFFER_SIZE]; loop { let read = decoder.read(&mut buffer).await?; if read == 0 { break; } - - yield Bytes::copy_from_slice(&buffer[..read]); + let chunk = buffer.get(..read).ok_or_else(|| { + io::Error::other(format!( + "decoder reported {read}-byte read into a {BUFFER_SIZE}-byte buffer" + )) + })?; + yield Bytes::copy_from_slice(chunk); } } } /// Decode a stream of brotli-compressed chunks into plain bytes. +#[inline] pub fn decode_brotli_stream(stream: S) -> impl Stream> where S: TryStream, Error = io::Error> + Unpin, @@ -39,15 +45,19 @@ where try_stream! { let reader = BufReader::new(stream.into_async_read()); let mut decoder = BrotliDecoder::new(reader); - let mut buffer = vec![0u8; BUFFER_SIZE]; + let mut buffer = vec![0_u8; BUFFER_SIZE]; loop { let read = decoder.read(&mut buffer).await?; if read == 0 { break; } - - yield Bytes::copy_from_slice(&buffer[..read]); + let chunk = buffer.get(..read).ok_or_else(|| { + io::Error::other(format!( + "decoder reported {read}-byte read into a {BUFFER_SIZE}-byte buffer" + )) + })?; + yield Bytes::copy_from_slice(chunk); } } } @@ -58,8 +68,8 @@ mod tests { use brotli::CompressorWriter; use flate2::{write::GzEncoder, Compression}; use futures::executor::block_on; - use futures_util::{stream, TryStreamExt}; - use std::io::Write; + use futures_util::stream; + use std::io::Write as _; #[test] fn decode_gzip_stream_yields_plain_bytes() { @@ -82,10 +92,9 @@ mod tests { #[test] fn decode_brotli_stream_yields_plain_bytes() { let mut brotli_bytes = Vec::new(); - { - let mut compressor = CompressorWriter::new(&mut brotli_bytes, 4096, 5, 21); - compressor.write_all(b"hello brotli").unwrap(); - } + let mut compressor = CompressorWriter::new(&mut brotli_bytes, 4096, 5, 21); + compressor.write_all(b"hello brotli").unwrap(); + drop(compressor); let stream = stream::iter(vec![Ok::, io::Error>(brotli_bytes)]); let decoded = block_on(async { diff --git a/crates/edgezero-core/src/config_store.rs b/crates/edgezero-core/src/config_store.rs index 5112449..6225bee 100644 --- a/crates/edgezero-core/src/config_store.rs +++ b/crates/edgezero-core/src/config_store.rs @@ -9,91 +9,6 @@ use std::sync::Arc; use anyhow::Error as AnyError; use thiserror::Error; -// --------------------------------------------------------------------------- -// Trait -// --------------------------------------------------------------------------- - -/// Errors returned by config-store backends. -/// -/// Missing keys are represented as `Ok(None)` from [`ConfigStore::get`]. -#[derive(Debug, Error)] -pub enum ConfigStoreError { - /// The caller asked for a key that is malformed for the active backend. - #[error("{message}")] - InvalidKey { message: String }, - /// The configured backend cannot currently serve requests. - #[error("config store unavailable: {message}")] - Unavailable { message: String }, - /// An unexpected backend or provider failure occurred. - #[error("config store error: {source}")] - Internal { source: AnyError }, -} - -impl ConfigStoreError { - /// Create an error for malformed or backend-invalid keys. - pub fn invalid_key(message: impl Into) -> Self { - Self::InvalidKey { - message: message.into(), - } - } - - /// Create an error for temporarily unavailable backends. - pub fn unavailable(message: impl Into) -> Self { - Self::Unavailable { - message: message.into(), - } - } - - /// Wrap an unexpected backend or provider failure. - pub fn internal(error: E) -> Self - where - E: Into, - { - Self::Internal { - source: error.into(), - } - } -} - -/// Object-safe interface for read-only configuration store backends. -/// -/// Implementations exist per adapter: -/// - `AxumConfigStore` (axum adapter) — env vars + in-memory defaults for dev -/// - `FastlyConfigStore` (fastly adapter) — Fastly Config Store -/// - `CloudflareConfigStore` (cloudflare adapter) — Cloudflare env bindings -pub trait ConfigStore: Send + Sync { - /// Retrieve a config value by key. Returns `None` if the key does not exist. - fn get(&self, key: &str) -> Result, ConfigStoreError>; -} - -// --------------------------------------------------------------------------- -// Handle -// --------------------------------------------------------------------------- - -/// A cloneable handle to a config store. -#[derive(Clone)] -pub struct ConfigStoreHandle { - store: Arc, -} - -impl fmt::Debug for ConfigStoreHandle { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ConfigStoreHandle").finish_non_exhaustive() - } -} - -impl ConfigStoreHandle { - /// Create a new handle wrapping a config store implementation. - pub fn new(store: Arc) -> Self { - Self { store } - } - - /// Get a config value by key. - pub fn get(&self, key: &str) -> Result, ConfigStoreError> { - self.store.get(key) - } -} - // --------------------------------------------------------------------------- // Contract test macro // --------------------------------------------------------------------------- @@ -114,8 +29,8 @@ impl ConfigStoreHandle { /// edgezero_core::config_store_contract_tests!(axum_config_store_contract, { /// AxumConfigStore::new( /// [ -/// ("contract.key.a".to_string(), "value_a".to_string()), -/// ("contract.key.b".to_string(), "value_b".to_string()), +/// ("contract.key.a".to_owned(), "value_a".to_owned()), +/// ("contract.key.b".to_owned(), "value_b".to_owned()), /// ], /// [], /// ) @@ -133,7 +48,7 @@ macro_rules! config_store_contract_tests { let store = $factory; assert_eq!( store.get("contract.key.a").expect("config value"), - Some("value_a".to_string()) + Some("value_a".to_owned()) ); } @@ -148,11 +63,11 @@ macro_rules! config_store_contract_tests { let store = $factory; assert_eq!( store.get("contract.key.a").expect("first config value"), - Some("value_a".to_string()) + Some("value_a".to_owned()) ); assert_eq!( store.get("contract.key.b").expect("second config value"), - Some("value_b".to_string()) + Some("value_b".to_owned()) ); } @@ -172,7 +87,7 @@ macro_rules! config_store_contract_tests { Ok(None) => {} Ok(Some(_)) => panic!("empty key should not return a value"), Err($crate::config_store::ConfigStoreError::InvalidKey { .. }) => {} - Err(e) => panic!("unexpected error for empty key: {}", e), + Err(err) => panic!("unexpected error for empty key: {}", err), } } @@ -184,7 +99,7 @@ macro_rules! config_store_contract_tests { let handle = ConfigStoreHandle::new(Arc::new($factory)); assert_eq!( handle.get("contract.key.a").expect("handle value"), - Some("value_a".to_string()) + Some("value_a".to_owned()) ); assert_eq!(handle.get("contract.key.missing").expect("handle miss"), None); } @@ -212,63 +127,175 @@ macro_rules! config_store_contract_tests { }; } +// --------------------------------------------------------------------------- +// Trait +// --------------------------------------------------------------------------- + +/// Errors returned by config-store backends. +/// +/// Missing keys are represented as `Ok(None)` from [`ConfigStore::get`]. +#[derive(Debug, Error)] +#[non_exhaustive] +pub enum ConfigStoreError { + /// An unexpected backend or provider failure occurred. + #[error("config store error: {source}")] + Internal { source: AnyError }, + /// The caller asked for a key that is malformed for the active backend. + #[error("{message}")] + InvalidKey { message: String }, + /// The configured backend cannot currently serve requests. + #[error("config store unavailable: {message}")] + Unavailable { message: String }, +} + +impl ConfigStoreError { + /// Wrap an unexpected backend or provider failure. + #[inline] + pub fn internal(error: E) -> Self + where + E: Into, + { + Self::Internal { + source: error.into(), + } + } + + /// Create an error for malformed or backend-invalid keys. + #[inline] + pub fn invalid_key>(message: S) -> Self { + Self::InvalidKey { + message: message.into(), + } + } + + /// Create an error for temporarily unavailable backends. + #[inline] + pub fn unavailable>(message: S) -> Self { + Self::Unavailable { + message: message.into(), + } + } +} + +/// Object-safe interface for read-only configuration store backends. +/// +/// Implementations exist per adapter: +/// - `AxumConfigStore` (axum adapter) — env vars + in-memory defaults for dev +/// - `FastlyConfigStore` (fastly adapter) — Fastly Config Store +/// - `CloudflareConfigStore` (cloudflare adapter) — Cloudflare env bindings +pub trait ConfigStore: Send + Sync { + /// Retrieve a config value by key. Returns `None` if the key does not exist. + /// + /// # Errors + /// Returns [`ConfigStoreError`] if `key` is invalid or the backend is unavailable. + fn get(&self, key: &str) -> Result, ConfigStoreError>; +} + +// --------------------------------------------------------------------------- +// Handle +// --------------------------------------------------------------------------- + +/// A cloneable handle to a config store. +#[derive(Clone)] +pub struct ConfigStoreHandle { + store: Arc, +} + +impl fmt::Debug for ConfigStoreHandle { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ConfigStoreHandle").finish_non_exhaustive() + } +} + +impl ConfigStoreHandle { + /// Get a config value by key. + /// + /// # Errors + /// Returns [`ConfigStoreError`] if `key` is invalid or the backend is unavailable. + #[inline] + pub fn get(&self, key: &str) -> Result, ConfigStoreError> { + self.store.get(key) + } + + /// Create a new handle wrapping a config store implementation. + #[inline] + pub fn new(store: Arc) -> Self { + Self { store } + } +} + // --------------------------------------------------------------------------- // Tests // --------------------------------------------------------------------------- #[cfg(test)] mod tests { + // Run the shared contract tests against TestConfigStore. + crate::config_store_contract_tests!( + test_config_store_contract, + TestConfigStore::new(&[("contract.key.a", "value_a"), ("contract.key.b", "value_b"),]) + ); + use super::*; use std::collections::HashMap; + struct FailingConfigStore; + struct TestConfigStore { data: HashMap, } + impl ConfigStore for FailingConfigStore { + fn get(&self, _key: &str) -> Result, ConfigStoreError> { + Err(ConfigStoreError::unavailable("backend offline")) + } + } + + impl ConfigStore for TestConfigStore { + fn get(&self, key: &str) -> Result, ConfigStoreError> { + Ok(self.data.get(key).cloned()) + } + } + impl TestConfigStore { fn new(entries: &[(&str, &str)]) -> Self { Self { data: entries .iter() - .map(|(k, v)| (k.to_string(), v.to_string())) + .map(|(key, value)| ((*key).to_owned(), (*value).to_owned())) .collect(), } } } - impl ConfigStore for TestConfigStore { - fn get(&self, key: &str) -> Result, ConfigStoreError> { - Ok(self.data.get(key).cloned()) - } - } - fn handle(entries: &[(&str, &str)]) -> ConfigStoreHandle { ConfigStoreHandle::new(Arc::new(TestConfigStore::new(entries))) } #[test] - fn config_store_get_returns_value_for_existing_key() { - let h = handle(&[("feature.checkout", "true")]); + fn config_store_get_returns_none_for_missing_key() { + let store_handle = handle(&[]); assert_eq!( - h.get("feature.checkout").expect("config value"), - Some("true".to_string()) + store_handle.get("nonexistent").expect("missing config"), + None ); } #[test] - fn config_store_get_returns_none_for_missing_key() { - let h = handle(&[]); - assert_eq!(h.get("nonexistent").expect("missing config"), None); + fn config_store_get_returns_value_for_existing_key() { + let store_handle = handle(&[("feature.checkout", "true")]); + assert_eq!( + store_handle.get("feature.checkout").expect("config value"), + Some("true".to_owned()) + ); } #[test] - fn config_store_handle_wraps_and_delegates() { - let h = handle(&[("timeout_ms", "1500")]); - assert_eq!( - h.get("timeout_ms").expect("config value"), - Some("1500".to_string()) - ); - assert_eq!(h.get("missing").expect("missing config"), None); + fn config_store_handle_debug_output() { + let store_handle = handle(&[]); + let debug = format!("{store_handle:?}"); + assert!(debug.contains("ConfigStoreHandle")); } #[test] @@ -284,28 +311,13 @@ mod tests { #[test] fn config_store_handle_new_accepts_arc() { let store = Arc::new(TestConfigStore::new(&[("a", "1")])); - let h = ConfigStoreHandle::new(store); + let store_handle = ConfigStoreHandle::new(store); assert_eq!( - h.get("a").expect("arc-backed config"), - Some("1".to_string()) + store_handle.get("a").expect("arc-backed config"), + Some("1".to_owned()) ); } - #[test] - fn config_store_handle_debug_output() { - let h = handle(&[]); - let debug = format!("{:?}", h); - assert!(debug.contains("ConfigStoreHandle")); - } - - struct FailingConfigStore; - - impl ConfigStore for FailingConfigStore { - fn get(&self, _key: &str) -> Result, ConfigStoreError> { - Err(ConfigStoreError::unavailable("backend offline")) - } - } - #[test] fn config_store_handle_propagates_backend_errors() { let handle = ConfigStoreHandle::new(Arc::new(FailingConfigStore)); @@ -315,9 +327,13 @@ mod tests { assert!(matches!(err, ConfigStoreError::Unavailable { .. })); } - // Run the shared contract tests against TestConfigStore. - crate::config_store_contract_tests!( - test_config_store_contract, - TestConfigStore::new(&[("contract.key.a", "value_a"), ("contract.key.b", "value_b"),]) - ); + #[test] + fn config_store_handle_wraps_and_delegates() { + let store_handle = handle(&[("timeout_ms", "1500")]); + assert_eq!( + store_handle.get("timeout_ms").expect("config value"), + Some("1500".to_owned()) + ); + assert_eq!(store_handle.get("missing").expect("missing config"), None); + } } diff --git a/crates/edgezero-core/src/context.rs b/crates/edgezero-core/src/context.rs index 92dd176..9e44456 100644 --- a/crates/edgezero-core/src/context.rs +++ b/crates/edgezero-core/src/context.rs @@ -10,52 +10,48 @@ use serde::de::DeserializeOwned; /// Request context exposed to handlers and middleware. pub struct RequestContext { - request: Request, path_params: PathParams, + request: Request, } impl RequestContext { - pub fn new(request: Request, params: PathParams) -> Self { - Self { - request, - path_params: params, - } - } - - pub fn request(&self) -> &Request { - &self.request - } - - pub fn request_mut(&mut self) -> &mut Request { - &mut self.request + #[inline] + pub fn body(&self) -> &Body { + self.request.body() } - pub fn into_request(self) -> Request { + #[inline] + pub fn config_store(&self) -> Option { self.request + .extensions() + .get::() + .cloned() } - pub fn path_params(&self) -> &PathParams { - &self.path_params - } - - pub fn path(&self) -> Result + /// # Errors + /// Returns [`EdgeError::bad_request`] if the body cannot be deserialized as form-urlencoded data into `T`, or the body is streaming. + #[inline] + pub fn form(&self) -> Result where T: DeserializeOwned, { - self.path_params - .deserialize() - .map_err(|err| EdgeError::bad_request(format!("invalid path parameters: {}", err))) + match self.request.body() { + Body::Once(bytes) => serde_urlencoded::from_bytes(bytes.as_ref()) + .map_err(|err| EdgeError::bad_request(format!("invalid form payload: {err}"))), + Body::Stream(_) => Err(EdgeError::bad_request( + "streaming bodies are not supported for form extraction", + )), + } } - pub fn query(&self) -> Result - where - T: DeserializeOwned, - { - let query = self.request.uri().query().unwrap_or(""); - serde_urlencoded::from_str(query) - .map_err(|err| EdgeError::bad_request(format!("invalid query string: {}", err))) + #[inline] + pub fn into_request(self) -> Request { + self.request } + /// # Errors + /// Returns [`EdgeError::bad_request`] if the body is not valid JSON for `T`. + #[inline] pub fn json(&self) -> Result where T: DeserializeOwned, @@ -63,43 +59,69 @@ impl RequestContext { self.request .body() .to_json() - .map_err(|err| EdgeError::bad_request(format!("invalid JSON payload: {}", err))) + .map_err(|err| EdgeError::bad_request(format!("invalid JSON payload: {err}"))) } - pub fn body(&self) -> &Body { - self.request.body() + /// Returns the KV store handle if one was configured for this request. + #[inline] + pub fn kv_handle(&self) -> Option { + self.request.extensions().get::().cloned() } - pub fn form(&self) -> Result + #[inline] + pub fn new(request: Request, params: PathParams) -> Self { + Self { + path_params: params, + request, + } + } + + /// # Errors + /// Returns [`EdgeError::bad_request`] if the path parameters cannot be deserialized into `T`. + #[inline] + pub fn path(&self) -> Result where T: DeserializeOwned, { - match self.request.body() { - Body::Once(bytes) => serde_urlencoded::from_bytes(bytes.as_ref()) - .map_err(|err| EdgeError::bad_request(format!("invalid form payload: {}", err))), - Body::Stream(_) => Err(EdgeError::bad_request( - "streaming bodies are not supported for form extraction", - )), - } + self.path_params + .deserialize() + .map_err(|err| EdgeError::bad_request(format!("invalid path parameters: {err}"))) + } + + #[inline] + pub fn path_params(&self) -> &PathParams { + &self.path_params } + #[inline] pub fn proxy_handle(&self) -> Option { self.request.extensions().get::().cloned() } - pub fn config_store(&self) -> Option { - self.request - .extensions() - .get::() - .cloned() + /// # Errors + /// Returns [`EdgeError::bad_request`] if the query string cannot be deserialized into `T`. + #[inline] + pub fn query(&self) -> Result + where + T: DeserializeOwned, + { + let query = self.request.uri().query().unwrap_or(""); + serde_urlencoded::from_str(query) + .map_err(|err| EdgeError::bad_request(format!("invalid query string: {err}"))) } - /// Returns the KV store handle if one was configured for this request. - pub fn kv_handle(&self) -> Option { - self.request.extensions().get::().cloned() + #[inline] + pub fn request(&self) -> &Request { + &self.request + } + + #[inline] + pub fn request_mut(&mut self) -> &mut Request { + &mut self.request } /// Returns the secret store handle if one was configured for this request. + #[inline] pub fn secret_handle(&self) -> Option { self.request.extensions().get::().cloned() } @@ -113,10 +135,25 @@ mod tests { use crate::proxy::{ProxyClient, ProxyHandle, ProxyRequest, ProxyResponse}; use async_trait::async_trait; use bytes::Bytes; + use futures::executor::block_on; use futures::stream; use serde::{Deserialize, Serialize}; use std::collections::HashMap; + struct DummyClient; + + #[derive(Debug, PartialEq, Deserialize, Serialize)] + struct PathData { + id: String, + } + + #[async_trait(?Send)] + impl ProxyClient for DummyClient { + async fn send(&self, _request: ProxyRequest) -> Result { + Ok(ProxyResponse::new(StatusCode::OK, Body::empty())) + } + } + fn ctx(path: &str, body: Body, params: PathParams) -> RequestContext { let request = request_builder() .method(Method::GET) @@ -129,28 +166,117 @@ mod tests { fn params(map: &[(&str, &str)]) -> PathParams { let inner = map .iter() - .map(|(k, v)| (k.to_string(), v.to_string())) + .map(|(key, value)| ((*key).to_owned(), (*value).to_owned())) .collect::>(); PathParams::new(inner) } - #[derive(Debug, PartialEq, Deserialize, Serialize)] - struct PathData { - id: String, + #[test] + fn config_store_is_retrieved_when_present() { + use crate::config_store::{ConfigStore, ConfigStoreError, ConfigStoreHandle}; + use std::sync::Arc; + + struct FixedStore; + impl ConfigStore for FixedStore { + fn get(&self, _key: &str) -> Result, ConfigStoreError> { + Ok(Some("value".to_owned())) + } + } + + let mut request = request_builder() + .method(Method::GET) + .uri("/config") + .body(Body::empty()) + .expect("request"); + request + .extensions_mut() + .insert(ConfigStoreHandle::new(Arc::new(FixedStore))); + + let ctx = RequestContext::new(request, PathParams::default()); + assert!(ctx.config_store().is_some()); + assert_eq!( + ctx.config_store() + .unwrap() + .get("any") + .expect("config value"), + Some("value".to_owned()) + ); } #[test] - fn path_deserialises_successfully() { - let ctx = ctx("/items/42", Body::empty(), params(&[("id", "42")])); - let parsed: PathData = ctx.path().expect("path parameters"); - assert_eq!(parsed, PathData { id: "42".into() }); - let serialized = serde_json::to_string(&parsed).expect("serialize"); - assert!(serialized.contains("42")); + fn config_store_returns_none_when_absent() { + let ctx = ctx("/test", Body::empty(), PathParams::default()); + assert!(ctx.config_store().is_none()); + } + + #[test] + fn form_deserialises_successfully() { + #[derive(Deserialize, PartialEq, Debug)] + struct FormData { + name: String, + } + let body = Body::from("name=demo"); + let ctx = ctx("/submit", body, PathParams::default()); + let parsed: FormData = ctx.form().expect("form data"); + assert_eq!( + parsed, + FormData { + name: "demo".into() + } + ); + let debug = format!("{parsed:?}"); + assert!(debug.contains("demo")); + } + + #[test] + fn form_streaming_body_not_supported() { + let stream = stream::iter(vec![Ok::(Bytes::from("name=demo"))]); + let body = Body::from_stream(stream); + let ctx = ctx("/submit", body, PathParams::default()); + let err = ctx.form::().expect_err("expected error"); + assert_eq!(err.status(), StatusCode::BAD_REQUEST); + assert!(err + .message() + .contains("streaming bodies are not supported for form extraction")); + } + + #[test] + fn form_value_deserialises_successfully() { + let body = Body::from("name=demo"); + let ctx = ctx("/submit", body, PathParams::default()); + let parsed: serde_json::Value = ctx.form().expect("form data"); + assert_eq!( + parsed.get("name").and_then(|value| value.as_str()), + Some("demo") + ); + } + + #[test] + fn invalid_form_returns_bad_request() { + #[expect(dead_code, reason = "field exercised only via Deserialize")] + #[derive(Deserialize)] + struct FormData { + age: u8, + } + let body = Body::from("age=not-a-number"); + let ctx = ctx("/submit", body, PathParams::default()); + let err = ctx.form::().err().expect("expected error"); + assert_eq!(err.status(), StatusCode::BAD_REQUEST); + assert!(err.message().contains("invalid form payload")); + } + + #[test] + fn invalid_json_returns_bad_request() { + let body = Body::from(&b"not json"[..]); + let ctx = ctx("/echo", body, PathParams::default()); + let err = ctx.json::().expect_err("expected error"); + assert_eq!(err.status(), StatusCode::BAD_REQUEST); + assert!(err.message().contains("invalid JSON payload")); } #[test] fn invalid_path_returns_bad_request() { - #[allow(dead_code)] + #[expect(dead_code, reason = "field exercised only via Deserialize")] #[derive(Debug, Deserialize)] struct NumericPath { id: u32, @@ -163,31 +289,9 @@ mod tests { assert!(err.message().contains("invalid path parameters")); } - #[test] - fn query_deserialises_successfully() { - #[derive(Debug, Deserialize, PartialEq)] - struct Query { - page: u8, - } - let ctx = ctx("/items?page=5", Body::empty(), PathParams::default()); - let parsed: Query = ctx.query().expect("query"); - assert_eq!(parsed, Query { page: 5 }); - } - - #[test] - fn query_defaults_to_empty_when_missing() { - #[derive(Debug, Deserialize, PartialEq)] - struct Query { - page: Option, - } - let ctx = ctx("/items", Body::empty(), PathParams::default()); - let parsed: Query = ctx.query().expect("query"); - assert_eq!(parsed.page, None); - } - #[test] fn invalid_query_returns_bad_request() { - #[allow(dead_code)] + #[expect(dead_code, reason = "field exercised only via Deserialize")] #[derive(Debug, Deserialize)] struct Query { page: u8, @@ -221,77 +325,44 @@ mod tests { } #[test] - fn invalid_json_returns_bad_request() { - let body = Body::from(&b"not json"[..]); - let ctx = ctx("/echo", body, PathParams::default()); - let err = ctx.json::().expect_err("expected error"); - assert_eq!(err.status(), StatusCode::BAD_REQUEST); - assert!(err.message().contains("invalid JSON payload")); - } + fn kv_handle_is_retrieved_when_present() { + use crate::key_value_store::{KvHandle, NoopKvStore}; + use std::sync::Arc; - #[test] - fn form_deserialises_successfully() { - #[derive(Deserialize, PartialEq, Debug)] - struct FormData { - name: String, - } - let body = Body::from("name=demo"); - let ctx = ctx("/submit", body, PathParams::default()); - let parsed: FormData = ctx.form().expect("form data"); - assert_eq!( - parsed, - FormData { - name: "demo".into() - } - ); - let debug = format!("{:?}", parsed); - assert!(debug.contains("demo")); - } + let mut request = request_builder() + .method(Method::GET) + .uri("/kv") + .body(Body::empty()) + .expect("request"); + request + .extensions_mut() + .insert(KvHandle::new(Arc::new(NoopKvStore))); - #[test] - fn invalid_form_returns_bad_request() { - #[allow(dead_code)] - #[derive(Deserialize)] - struct FormData { - age: u8, - } - let body = Body::from("age=not-a-number"); - let ctx = ctx("/submit", body, PathParams::default()); - let err = ctx.form::().err().expect("expected error"); - assert_eq!(err.status(), StatusCode::BAD_REQUEST); - assert!(err.message().contains("invalid form payload")); + let ctx = RequestContext::new(request, PathParams::default()); + assert!(ctx.kv_handle().is_some()); } #[test] - fn form_value_deserialises_successfully() { - let body = Body::from("name=demo"); - let ctx = ctx("/submit", body, PathParams::default()); - let parsed: serde_json::Value = ctx.form().expect("form data"); - assert_eq!( - parsed.get("name").and_then(|value| value.as_str()), - Some("demo") - ); + fn kv_handle_returns_none_when_absent() { + let ctx = ctx("/test", Body::empty(), PathParams::default()); + assert!(ctx.kv_handle().is_none()); } #[test] - fn form_streaming_body_not_supported() { - let stream = stream::iter(vec![Ok::(Bytes::from("name=demo"))]); - let body = Body::from_stream(stream); - let ctx = ctx("/submit", body, PathParams::default()); - let err = ctx.form::().expect_err("expected error"); - assert_eq!(err.status(), StatusCode::BAD_REQUEST); - assert!(err - .message() - .contains("streaming bodies are not supported for form extraction")); + fn path_deserialises_successfully() { + let ctx = ctx("/items/42", Body::empty(), params(&[("id", "42")])); + let parsed: PathData = ctx.path().expect("path parameters"); + assert_eq!(parsed, PathData { id: "42".into() }); + let serialized = serde_json::to_string(&parsed).expect("serialize"); + assert!(serialized.contains("42")); } - struct DummyClient; - - #[async_trait(?Send)] - impl ProxyClient for DummyClient { - async fn send(&self, _request: ProxyRequest) -> Result { - Ok(ProxyResponse::new(StatusCode::OK, Body::empty())) - } + #[test] + fn proxy_handle_forwards_with_dummy_client() { + let handle = ProxyHandle::with_client(DummyClient); + let request = ProxyRequest::new(Method::GET, Uri::from_static("https://example.com")); + let response = block_on(handle.forward(request)).expect("response"); + assert_eq!(response.status(), StatusCode::OK); } #[test] @@ -309,6 +380,28 @@ mod tests { assert!(ctx.proxy_handle().is_some()); } + #[test] + fn query_defaults_to_empty_when_missing() { + #[derive(Debug, Deserialize, PartialEq)] + struct Query { + page: Option, + } + let ctx = ctx("/items", Body::empty(), PathParams::default()); + let parsed: Query = ctx.query().expect("query"); + assert_eq!(parsed.page, None); + } + + #[test] + fn query_deserialises_successfully() { + #[derive(Debug, Deserialize, PartialEq)] + struct Query { + page: u8, + } + let ctx = ctx("/items?page=5", Body::empty(), PathParams::default()); + let parsed: Query = ctx.query().expect("query"); + assert_eq!(parsed, Query { page: 5 }); + } + #[test] fn request_context_accessors_return_expected_values() { let mut ctx = ctx( @@ -324,89 +417,16 @@ mod tests { ctx.request() .headers() .get("x-test") - .and_then(|v| v.to_str().ok()), + .and_then(|value| value.to_str().ok()), Some("value") ); assert_eq!(ctx.path_params().get("id"), Some("123")); - assert_eq!(ctx.body().as_bytes(), b"payload"); + assert_eq!(ctx.body().as_bytes().expect("buffered"), b"payload"); let request = ctx.into_request(); assert_eq!(request.uri().path(), "/items/123"); } - #[test] - fn proxy_handle_forwards_with_dummy_client() { - let handle = ProxyHandle::with_client(DummyClient); - let request = ProxyRequest::new(Method::GET, Uri::from_static("https://example.com")); - let response = futures::executor::block_on(handle.forward(request)).expect("response"); - assert_eq!(response.status(), StatusCode::OK); - } - - #[test] - fn config_store_is_retrieved_when_present() { - use crate::config_store::{ConfigStore, ConfigStoreHandle}; - use std::sync::Arc; - - struct FixedStore; - impl ConfigStore for FixedStore { - fn get( - &self, - _key: &str, - ) -> Result, crate::config_store::ConfigStoreError> { - Ok(Some("value".to_string())) - } - } - - let mut request = request_builder() - .method(Method::GET) - .uri("/config") - .body(Body::empty()) - .expect("request"); - request - .extensions_mut() - .insert(ConfigStoreHandle::new(Arc::new(FixedStore))); - - let ctx = RequestContext::new(request, PathParams::default()); - assert!(ctx.config_store().is_some()); - assert_eq!( - ctx.config_store() - .unwrap() - .get("any") - .expect("config value"), - Some("value".to_string()) - ); - } - - #[test] - fn config_store_returns_none_when_absent() { - let ctx = ctx("/test", Body::empty(), PathParams::default()); - assert!(ctx.config_store().is_none()); - } - - #[test] - fn kv_handle_is_retrieved_when_present() { - use crate::key_value_store::{KvHandle, NoopKvStore}; - use std::sync::Arc; - - let mut request = request_builder() - .method(Method::GET) - .uri("/kv") - .body(Body::empty()) - .expect("request"); - request - .extensions_mut() - .insert(KvHandle::new(Arc::new(NoopKvStore))); - - let ctx = RequestContext::new(request, PathParams::default()); - assert!(ctx.kv_handle().is_some()); - } - - #[test] - fn kv_handle_returns_none_when_absent() { - let ctx = ctx("/test", Body::empty(), PathParams::default()); - assert!(ctx.kv_handle().is_none()); - } - #[test] fn secret_handle_is_retrieved_when_present() { use crate::secret_store::{NoopSecretStore, SecretHandle}; diff --git a/crates/edgezero-core/src/error.rs b/crates/edgezero-core/src/error.rs index f1ed765..45fe861 100644 --- a/crates/edgezero-core/src/error.rs +++ b/crates/edgezero-core/src/error.rs @@ -10,49 +10,68 @@ use crate::response::{response_with_body, IntoResponse}; /// Application-level error that carries an HTTP status code. #[derive(Debug, Error)] +#[non_exhaustive] pub enum EdgeError { #[error("{message}")] BadRequest { message: String }, - #[error("no route matched path: {path}")] - NotFound { path: String }, - #[error("method {method} not allowed; allowed: {allowed}")] - MethodNotAllowed { method: Method, allowed: String }, - #[error("validation error: {message}")] - Validation { message: String }, - #[error("service unavailable: {message}")] - ServiceUnavailable { message: String }, #[error("internal error: {source}")] Internal { #[from] source: AnyError, }, + #[error("method {method} not allowed; allowed: {allowed}")] + MethodNotAllowed { method: Method, allowed: String }, + #[error("no route matched path: {path}")] + NotFound { path: String }, + #[error("service unavailable: {message}")] + ServiceUnavailable { message: String }, + #[error("validation error: {message}")] + Validation { message: String }, } impl EdgeError { - pub fn bad_request(message: impl Into) -> Self { + #[inline] + pub fn bad_request>(message: S) -> Self { EdgeError::BadRequest { message: message.into(), } } - pub fn validation(message: impl Into) -> Self { - EdgeError::Validation { - message: message.into(), + #[inline] + pub fn internal(error: E) -> Self + where + E: Into, + { + EdgeError::Internal { + source: error.into(), } } - pub fn not_found(path: impl Into) -> Self { - EdgeError::NotFound { path: path.into() } + #[must_use] + #[inline] + pub fn message(&self) -> String { + match self { + EdgeError::BadRequest { message } + | EdgeError::Validation { message } + | EdgeError::ServiceUnavailable { message } => message.clone(), + EdgeError::NotFound { path } => format!("no route matched path: {path}"), + EdgeError::MethodNotAllowed { method, allowed } => { + format!("method {method} not allowed; allowed: {allowed}") + } + EdgeError::Internal { source } => format!("internal error: {source}"), + } } + #[must_use] + #[inline] pub fn method_not_allowed(method: &Method, allowed: &[Method]) -> Self { let mut names = allowed .iter() - .map(|m| m.as_str().to_string()) + .map(|name| name.as_str().to_owned()) .collect::>(); names.sort(); let allowed_list = if names.is_empty() { - "(none)".to_string() + "(none)".to_owned() } else { names.join(", ") }; @@ -62,21 +81,41 @@ impl EdgeError { } } - pub fn internal(error: E) -> Self - where - E: Into, - { - EdgeError::Internal { - source: error.into(), - } + #[inline] + pub fn not_found>(path: S) -> Self { + EdgeError::NotFound { path: path.into() } } - pub fn service_unavailable(message: impl Into) -> Self { + #[inline] + pub fn service_unavailable>(message: S) -> Self { EdgeError::ServiceUnavailable { message: message.into(), } } + /// Typed access to the wrapped [`AnyError`] for `EdgeError::Internal`. + /// Shadows [`std::error::Error::source`] (auto-derived by `thiserror`) + /// intentionally — the trait method returns a `&dyn Error`, this one + /// returns the concrete `&anyhow::Error` so callers can downcast. + #[expect( + clippy::same_name_method, + reason = "intentional: typed alternative to the trait-object Error::source" + )] + #[must_use] + #[inline] + pub fn source(&self) -> Option<&AnyError> { + match self { + EdgeError::Internal { source } => Some(source), + EdgeError::BadRequest { .. } + | EdgeError::NotFound { .. } + | EdgeError::MethodNotAllowed { .. } + | EdgeError::Validation { .. } + | EdgeError::ServiceUnavailable { .. } => None, + } + } + + #[must_use] + #[inline] pub fn status(&self) -> StatusCode { match self { EdgeError::BadRequest { .. } => StatusCode::BAD_REQUEST, @@ -88,28 +127,16 @@ impl EdgeError { } } - pub fn message(&self) -> String { - match self { - EdgeError::BadRequest { message } => message.clone(), - EdgeError::Validation { message } => message.clone(), - EdgeError::NotFound { path } => format!("no route matched path: {path}"), - EdgeError::MethodNotAllowed { method, allowed } => { - format!("method {} not allowed; allowed: {}", method, allowed) - } - EdgeError::ServiceUnavailable { message } => message.clone(), - EdgeError::Internal { source } => format!("internal error: {}", source), - } - } - - pub fn source(&self) -> Option<&AnyError> { - match self { - EdgeError::Internal { source } => Some(source), - _ => None, + #[inline] + pub fn validation>(message: S) -> Self { + EdgeError::Validation { + message: message.into(), } } } impl From for EdgeError { + #[inline] fn from(err: ConfigStoreError) -> Self { match err { ConfigStoreError::InvalidKey { message } => EdgeError::bad_request(message), @@ -119,12 +146,9 @@ impl From for EdgeError { } } -fn json_or_text(payload: &T) -> Body { - Body::json(payload).unwrap_or_else(|_| Body::text("internal error")) -} - impl IntoResponse for EdgeError { - fn into_response(self) -> Response { + #[inline] + fn into_response(self) -> Result { let payload = json!({ "error": { "status": self.status().as_u16(), @@ -133,19 +157,24 @@ impl IntoResponse for EdgeError { }); let body = json_or_text(&payload); - let mut response = response_with_body(self.status(), body); + let mut response = response_with_body(self.status(), body)?; response .headers_mut() .insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); - response + Ok(response) } } +fn json_or_text(payload: &T) -> Body { + Body::json(payload).unwrap_or_else(|_| Body::text("internal error")) +} + #[cfg(test)] mod tests { use super::*; use crate::http::Method; use serde::ser; + use std::str; #[test] fn bad_request_sets_status_and_message() { @@ -155,47 +184,17 @@ mod tests { } #[test] - fn method_not_allowed_lists_methods_sorted() { - let err = EdgeError::method_not_allowed(&Method::POST, &[Method::GET, Method::DELETE]); - assert_eq!(err.status(), StatusCode::METHOD_NOT_ALLOWED); - assert!(err.message().contains("allowed: DELETE, GET")); - } - - #[test] - fn internal_wraps_source_error() { - let err = EdgeError::internal(anyhow::anyhow!("boom")); + fn config_store_error_internal_maps_to_internal_server_error() { + let err = EdgeError::from(ConfigStoreError::internal(anyhow::anyhow!("boom"))); assert_eq!(err.status(), StatusCode::INTERNAL_SERVER_ERROR); - assert!(err.message().contains("internal error: boom")); - assert!(err.source().is_some()); - } - - #[test] - fn not_found_sets_status_and_message() { - let err = EdgeError::not_found("/missing"); - assert_eq!(err.status(), StatusCode::NOT_FOUND); - assert!(err.message().contains("/missing")); - } - - #[test] - fn validation_sets_status_and_message() { - let err = EdgeError::validation("invalid input"); - assert_eq!(err.status(), StatusCode::UNPROCESSABLE_ENTITY); - assert_eq!(err.message(), "invalid input"); - assert!(err.source().is_none()); - } - - #[test] - fn method_not_allowed_handles_empty_allowed_list() { - let err = EdgeError::method_not_allowed(&Method::GET, &[]); - assert_eq!(err.status(), StatusCode::METHOD_NOT_ALLOWED); - assert!(err.message().contains("(none)")); + assert!(err.message().contains("boom")); } #[test] - fn service_unavailable_sets_status_and_message() { - let err = EdgeError::service_unavailable("config store unavailable"); - assert_eq!(err.status(), StatusCode::SERVICE_UNAVAILABLE); - assert_eq!(err.message(), "config store unavailable"); + fn config_store_error_invalid_key_maps_to_bad_request() { + let err = EdgeError::from(ConfigStoreError::invalid_key("invalid config key")); + assert_eq!(err.status(), StatusCode::BAD_REQUEST); + assert_eq!(err.message(), "invalid config key"); } #[test] @@ -206,17 +205,27 @@ mod tests { } #[test] - fn config_store_error_invalid_key_maps_to_bad_request() { - let err = EdgeError::from(ConfigStoreError::invalid_key("invalid config key")); - assert_eq!(err.status(), StatusCode::BAD_REQUEST); - assert_eq!(err.message(), "invalid config key"); + fn internal_wraps_source_error() { + let err = EdgeError::internal(anyhow::anyhow!("boom")); + assert_eq!(err.status(), StatusCode::INTERNAL_SERVER_ERROR); + assert!(err.message().contains("internal error: boom")); + assert!(err.source().is_some()); } #[test] - fn config_store_error_internal_maps_to_internal_server_error() { - let err = EdgeError::from(ConfigStoreError::internal(anyhow::anyhow!("boom"))); - assert_eq!(err.status(), StatusCode::INTERNAL_SERVER_ERROR); - assert!(err.message().contains("boom")); + fn into_response_sets_json_payload() { + let response = EdgeError::bad_request("invalid") + .into_response() + .expect("response"); + assert_eq!(response.status(), StatusCode::BAD_REQUEST); + let content_type = response + .headers() + .get(CONTENT_TYPE) + .expect("content-type header"); + assert_eq!(content_type, HeaderValue::from_static("application/json")); + + let body = response.into_body().into_bytes().expect("buffered"); + assert!(str::from_utf8(body.as_ref()).unwrap().contains("invalid")); } #[test] @@ -233,22 +242,42 @@ mod tests { } let body = json_or_text(&FailingSerialize); - assert_eq!(body.as_bytes(), b"internal error"); + assert_eq!(body.as_bytes().expect("buffered"), b"internal error"); } #[test] - fn into_response_sets_json_payload() { - let response = EdgeError::bad_request("invalid").into_response(); - assert_eq!(response.status(), StatusCode::BAD_REQUEST); - let content_type = response - .headers() - .get(CONTENT_TYPE) - .expect("content-type header"); - assert_eq!(content_type, HeaderValue::from_static("application/json")); + fn method_not_allowed_handles_empty_allowed_list() { + let err = EdgeError::method_not_allowed(&Method::GET, &[]); + assert_eq!(err.status(), StatusCode::METHOD_NOT_ALLOWED); + assert!(err.message().contains("(none)")); + } + + #[test] + fn method_not_allowed_lists_methods_sorted() { + let err = EdgeError::method_not_allowed(&Method::POST, &[Method::GET, Method::DELETE]); + assert_eq!(err.status(), StatusCode::METHOD_NOT_ALLOWED); + assert!(err.message().contains("allowed: DELETE, GET")); + } - let body = response.into_body().into_bytes(); - assert!(std::str::from_utf8(body.as_ref()) - .unwrap() - .contains("invalid")); + #[test] + fn not_found_sets_status_and_message() { + let err = EdgeError::not_found("/missing"); + assert_eq!(err.status(), StatusCode::NOT_FOUND); + assert!(err.message().contains("/missing")); + } + + #[test] + fn service_unavailable_sets_status_and_message() { + let err = EdgeError::service_unavailable("config store unavailable"); + assert_eq!(err.status(), StatusCode::SERVICE_UNAVAILABLE); + assert_eq!(err.message(), "config store unavailable"); + } + + #[test] + fn validation_sets_status_and_message() { + let err = EdgeError::validation("invalid input"); + assert_eq!(err.status(), StatusCode::UNPROCESSABLE_ENTITY); + assert_eq!(err.message(), "invalid input"); + assert!(err.source().is_none()); } } diff --git a/crates/edgezero-core/src/extractor.rs b/crates/edgezero-core/src/extractor.rs index 0d9e156..5bbde8e 100644 --- a/crates/edgezero-core/src/extractor.rs +++ b/crates/edgezero-core/src/extractor.rs @@ -8,6 +8,8 @@ use validator::Validate; use crate::context::RequestContext; use crate::error::EdgeError; use crate::http::HeaderMap; +use crate::key_value_store::KvHandle; +use crate::secret_store::SecretHandle; #[async_trait(?Send)] pub trait FromRequest: Sized { @@ -21,6 +23,7 @@ impl FromRequest for Json where T: DeserializeOwned + Send + 'static, { + #[inline] async fn from_request(ctx: &RequestContext) -> Result { ctx.json().map(Json) } @@ -29,18 +32,21 @@ where impl Deref for Json { type Target = T; + #[inline] fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for Json { + #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl Json { + #[inline] pub fn into_inner(self) -> T { self.0 } @@ -53,6 +59,7 @@ impl FromRequest for ValidatedJson where T: DeserializeOwned + Validate + Send + 'static, { + #[inline] async fn from_request(ctx: &RequestContext) -> Result { let Json(value) = Json::::from_request(ctx).await?; value @@ -65,18 +72,21 @@ where impl Deref for ValidatedJson { type Target = T; + #[inline] fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for ValidatedJson { + #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl ValidatedJson { + #[inline] pub fn into_inner(self) -> T { self.0 } @@ -86,6 +96,7 @@ pub struct Headers(pub HeaderMap); #[async_trait(?Send)] impl FromRequest for Headers { + #[inline] async fn from_request(ctx: &RequestContext) -> Result { Ok(Headers(ctx.request().headers().clone())) } @@ -94,18 +105,22 @@ impl FromRequest for Headers { impl Deref for Headers { type Target = HeaderMap; + #[inline] fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for Headers { + #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl Headers { + #[must_use] + #[inline] pub fn into_inner(self) -> HeaderMap { self.0 } @@ -126,13 +141,14 @@ pub struct Host(pub String); #[async_trait(?Send)] impl FromRequest for Host { + #[inline] async fn from_request(ctx: &RequestContext) -> Result { let headers = ctx.request().headers(); let host = headers .get(header::HOST) - .and_then(|v| v.to_str().ok()) + .and_then(|value| value.to_str().ok()) .unwrap_or("localhost") - .to_string(); + .to_owned(); Ok(Host(host)) } } @@ -140,12 +156,15 @@ impl FromRequest for Host { impl Deref for Host { type Target = String; + #[inline] fn deref(&self) -> &Self::Target { &self.0 } } impl Host { + #[must_use] + #[inline] pub fn into_inner(self) -> String { self.0 } @@ -171,14 +190,15 @@ pub struct ForwardedHost(pub String); #[async_trait(?Send)] impl FromRequest for ForwardedHost { + #[inline] async fn from_request(ctx: &RequestContext) -> Result { let headers = ctx.request().headers(); let host = headers .get("x-forwarded-host") .or_else(|| headers.get(header::HOST)) - .and_then(|v| v.to_str().ok()) + .and_then(|value| value.to_str().ok()) .unwrap_or("localhost") - .to_string(); + .to_owned(); Ok(ForwardedHost(host)) } } @@ -186,12 +206,15 @@ impl FromRequest for ForwardedHost { impl Deref for ForwardedHost { type Target = String; + #[inline] fn deref(&self) -> &Self::Target { &self.0 } } impl ForwardedHost { + #[must_use] + #[inline] pub fn into_inner(self) -> String { self.0 } @@ -204,6 +227,7 @@ impl FromRequest for Query where T: DeserializeOwned + Send + 'static, { + #[inline] async fn from_request(ctx: &RequestContext) -> Result { ctx.query().map(Query) } @@ -212,18 +236,21 @@ where impl Deref for Query { type Target = T; + #[inline] fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for Query { + #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl Query { + #[inline] pub fn into_inner(self) -> T { self.0 } @@ -236,6 +263,7 @@ impl FromRequest for ValidatedQuery where T: DeserializeOwned + Validate + Send + 'static, { + #[inline] async fn from_request(ctx: &RequestContext) -> Result { let Query(value) = Query::::from_request(ctx).await?; value @@ -248,18 +276,21 @@ where impl Deref for ValidatedQuery { type Target = T; + #[inline] fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for ValidatedQuery { + #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl ValidatedQuery { + #[inline] pub fn into_inner(self) -> T { self.0 } @@ -272,6 +303,7 @@ impl FromRequest for Path where T: DeserializeOwned + Send + 'static, { + #[inline] async fn from_request(ctx: &RequestContext) -> Result { ctx.path().map(Path) } @@ -280,18 +312,21 @@ where impl Deref for Path { type Target = T; + #[inline] fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for Path { + #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl Path { + #[inline] pub fn into_inner(self) -> T { self.0 } @@ -304,6 +339,7 @@ impl FromRequest for ValidatedPath where T: DeserializeOwned + Validate + Send + 'static, { + #[inline] async fn from_request(ctx: &RequestContext) -> Result { let Path(value) = Path::::from_request(ctx).await?; value @@ -316,18 +352,21 @@ where impl Deref for ValidatedPath { type Target = T; + #[inline] fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for ValidatedPath { + #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl ValidatedPath { + #[inline] pub fn into_inner(self) -> T { self.0 } @@ -340,6 +379,7 @@ impl FromRequest for Form where T: DeserializeOwned + Send + 'static, { + #[inline] async fn from_request(ctx: &RequestContext) -> Result { ctx.form().map(Form) } @@ -348,18 +388,21 @@ where impl Deref for Form { type Target = T; + #[inline] fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for Form { + #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl Form { + #[inline] pub fn into_inner(self) -> T { self.0 } @@ -372,6 +415,7 @@ impl FromRequest for ValidatedForm where T: DeserializeOwned + Validate + Send + 'static, { + #[inline] async fn from_request(ctx: &RequestContext) -> Result { let Form(value) = Form::::from_request(ctx).await?; value @@ -384,18 +428,21 @@ where impl Deref for ValidatedForm { type Target = T; + #[inline] fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for ValidatedForm { + #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl ValidatedForm { + #[inline] pub fn into_inner(self) -> T { self.0 } @@ -415,10 +462,11 @@ impl ValidatedForm { /// } /// ``` #[derive(Debug)] -pub struct Kv(pub crate::key_value_store::KvHandle); +pub struct Kv(pub KvHandle); #[async_trait(?Send)] impl FromRequest for Kv { + #[inline] async fn from_request(ctx: &RequestContext) -> Result { ctx.kv_handle().map(Kv).ok_or_else(|| { EdgeError::internal(anyhow::anyhow!( @@ -428,22 +476,26 @@ impl FromRequest for Kv { } } -impl std::ops::Deref for Kv { - type Target = crate::key_value_store::KvHandle; +impl Deref for Kv { + type Target = KvHandle; + #[inline] fn deref(&self) -> &Self::Target { &self.0 } } -impl std::ops::DerefMut for Kv { +impl DerefMut for Kv { + #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl Kv { - pub fn into_inner(self) -> crate::key_value_store::KvHandle { + #[must_use] + #[inline] + pub fn into_inner(self) -> KvHandle { self.0 } } @@ -461,10 +513,11 @@ impl Kv { /// } /// ``` #[derive(Debug)] -pub struct Secrets(pub crate::secret_store::SecretHandle); +pub struct Secrets(pub SecretHandle); #[async_trait(?Send)] impl FromRequest for Secrets { + #[inline] async fn from_request(ctx: &RequestContext) -> Result { // ctx.secret_handle() returns a handle object, not secret bytes. // The error message below contains only store configuration info — no secret values @@ -477,22 +530,26 @@ impl FromRequest for Secrets { } } -impl std::ops::Deref for Secrets { - type Target = crate::secret_store::SecretHandle; +impl Deref for Secrets { + type Target = SecretHandle; + #[inline] fn deref(&self) -> &Self::Target { &self.0 } } -impl std::ops::DerefMut for Secrets { +impl DerefMut for Secrets { + #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl Secrets { - pub fn into_inner(self) -> crate::secret_store::SecretHandle { + #[must_use] + #[inline] + pub fn into_inner(self) -> SecretHandle { self.0 } } @@ -509,21 +566,15 @@ mod tests { use std::collections::HashMap; use validator::Validate; - fn ctx(body: Body, params: PathParams) -> RequestContext { - let request = request_builder() - .method(Method::POST) - .uri("/test") - .body(body) - .expect("request"); - RequestContext::new(request, params) + #[derive(Debug, Deserialize, PartialEq)] + struct FormData { + age: Option, + username: String, } - fn params(values: &[(&str, &str)]) -> PathParams { - let map = values - .iter() - .map(|(k, v)| (k.to_string(), v.to_string())) - .collect::>(); - PathParams::new(map) + #[derive(Debug, Deserialize, PartialEq)] + struct PathPayload { + id: String, } #[derive(Debug, Deserialize, Serialize, PartialEq)] @@ -531,17 +582,74 @@ mod tests { name: String, } + #[derive(Debug, Deserialize, PartialEq)] + struct QueryParams { + page: Option, + #[serde(rename = "q")] + query_term: Option, + } + + #[derive(Debug, Deserialize, Validate)] + struct ValidatedFormData { + #[validate(length(min = 3_u64))] + username: String, + } + #[derive(Debug, Deserialize, Serialize, Validate)] struct ValidatedPayload { - #[validate(length(min = 1))] + #[validate(length(min = 1_u64))] name: String, } - #[derive(Debug, Deserialize, PartialEq)] - struct PathPayload { + #[derive(Debug, Deserialize, Validate)] + struct ValidatedPathParams { + #[validate(length(min = 1_u64, max = 10_u64))] id: String, } + #[derive(Debug, Deserialize, Validate)] + struct ValidatedQueryParams { + #[validate(range(min = 1_u32, max = 100_u32))] + page: u32, + } + + fn ctx(body: Body, params: PathParams) -> RequestContext { + let request = request_builder() + .method(Method::POST) + .uri("/test") + .body(body) + .expect("request"); + RequestContext::new(request, params) + } + + fn ctx_with_form(body: &str) -> RequestContext { + let request = request_builder() + .method(Method::POST) + .uri("/test") + .header("content-type", "application/x-www-form-urlencoded") + .body(Body::from(body.to_owned())) + .expect("request"); + RequestContext::new(request, PathParams::default()) + } + + fn ctx_with_query(query: &str) -> RequestContext { + let uri = format!("/test?{query}"); + let request = request_builder() + .method(Method::GET) + .uri(uri) + .body(Body::empty()) + .expect("request"); + RequestContext::new(request, PathParams::default()) + } + + fn params(values: &[(&str, &str)]) -> PathParams { + let map = values + .iter() + .map(|(key, value)| ((*key).to_owned(), (*value).to_owned())) + .collect::>(); + PathParams::new(map) + } + #[test] fn json_extractor_parses_payload() { let body = Body::json(&Payload { @@ -564,7 +672,10 @@ mod tests { #[test] fn validated_json_rejects_invalid_payloads() { - let body = Body::json(&ValidatedPayload { name: "".into() }).expect("json"); + let body = Body::json(&ValidatedPayload { + name: String::new(), + }) + .expect("json"); let ctx = ctx(body, PathParams::default()); let err = block_on(ValidatedJson::::from_request(&ctx)) .err() @@ -587,34 +698,20 @@ mod tests { .insert("x-test", HeaderValue::from_static("value")); let headers = block_on(Headers::from_request(&ctx)).expect("headers"); assert_eq!( - headers.get("x-test").and_then(|v| v.to_str().ok()).unwrap(), + headers + .get("x-test") + .and_then(|value| value.to_str().ok()) + .unwrap(), "value" ); } - // Query extractor tests - #[derive(Debug, Deserialize, PartialEq)] - struct QueryParams { - page: Option, - q: Option, - } - - fn ctx_with_query(query: &str) -> RequestContext { - let uri = format!("/test?{}", query); - let request = request_builder() - .method(Method::GET) - .uri(uri) - .body(Body::empty()) - .expect("request"); - RequestContext::new(request, PathParams::default()) - } - #[test] fn query_extractor_parses_params() { let ctx = ctx_with_query("page=5&q=hello"); let query = block_on(Query::::from_request(&ctx)).expect("query"); assert_eq!(query.page, Some(5)); - assert_eq!(query.q.as_deref(), Some("hello")); + assert_eq!(query.query_term.as_deref(), Some("hello")); } #[test] @@ -622,7 +719,7 @@ mod tests { let ctx = ctx_with_query("page=1"); let query = block_on(Query::::from_request(&ctx)).expect("query"); assert_eq!(query.page, Some(1)); - assert_eq!(query.q, None); + assert_eq!(query.query_term, None); } #[test] @@ -635,13 +732,7 @@ mod tests { let ctx = RequestContext::new(request, PathParams::default()); let query = block_on(Query::::from_request(&ctx)).expect("query"); assert_eq!(query.page, None); - assert_eq!(query.q, None); - } - - #[derive(Debug, Deserialize, Validate)] - struct ValidatedQueryParams { - #[validate(range(min = 1, max = 100))] - page: u32, + assert_eq!(query.query_term, None); } #[test] @@ -661,23 +752,6 @@ mod tests { assert_eq!(err.status(), StatusCode::UNPROCESSABLE_ENTITY); } - // Form extractor tests - fn ctx_with_form(body: &str) -> RequestContext { - let request = request_builder() - .method(Method::POST) - .uri("/test") - .header("content-type", "application/x-www-form-urlencoded") - .body(Body::from(body.to_string())) - .expect("request"); - RequestContext::new(request, PathParams::default()) - } - - #[derive(Debug, Deserialize, PartialEq)] - struct FormData { - username: String, - age: Option, - } - #[test] fn form_extractor_parses_urlencoded_body() { let ctx = ctx_with_form("username=alice&age=30"); @@ -694,12 +768,6 @@ mod tests { assert_eq!(form.age, None); } - #[derive(Debug, Deserialize, Validate)] - struct ValidatedFormData { - #[validate(length(min = 3))] - username: String, - } - #[test] fn validated_form_accepts_valid_data() { let ctx = ctx_with_form("username=alice"); @@ -716,13 +784,6 @@ mod tests { assert_eq!(err.status(), StatusCode::UNPROCESSABLE_ENTITY); } - // ValidatedPath tests - #[derive(Debug, Deserialize, Validate)] - struct ValidatedPathParams { - #[validate(length(min = 1, max = 10))] - id: String, - } - #[test] fn validated_path_accepts_valid_params() { let ctx = ctx(Body::empty(), params(&[("id", "abc123")])); @@ -762,7 +823,7 @@ mod tests { fn query_deref_and_into_inner() { let query = Query(QueryParams { page: Some(1), - q: None, + query_term: None, }); assert_eq!(query.page, Some(1)); // Deref let inner = query.into_inner(); @@ -773,7 +834,7 @@ mod tests { fn query_deref_mut() { let mut query = Query(QueryParams { page: Some(1), - q: None, + query_term: None, }); query.page = Some(2); // DerefMut assert_eq!(query.page, Some(2)); @@ -946,7 +1007,7 @@ mod tests { #[test] fn host_deref_and_into_inner() { - let host = Host("example.com".to_string()); + let host = Host("example.com".to_owned()); assert_eq!(&*host, "example.com"); // Deref let inner = host.into_inner(); assert_eq!(inner, "example.com"); @@ -1000,7 +1061,7 @@ mod tests { #[test] fn forwarded_host_deref_and_into_inner() { - let host = ForwardedHost("example.com".to_string()); + let host = ForwardedHost("example.com".to_owned()); assert_eq!(&*host, "example.com"); // Deref let inner = host.into_inner(); assert_eq!(inner, "example.com"); @@ -1023,8 +1084,7 @@ mod tests { .insert(KvHandle::new(Arc::new(NoopKvStore))); let ctx = RequestContext::new(request, PathParams::default()); - let kv = block_on(Kv::from_request(&ctx)); - assert!(kv.is_ok()); + block_on(Kv::from_request(&ctx)).expect("Kv extractor when handle present"); } #[test] @@ -1049,14 +1109,14 @@ mod tests { let kv = Kv(handle); // Debug works - let debug = format!("{:?}", kv); + let debug = format!("{kv:?}"); assert!(debug.contains("Kv")); // Deref works let _: &KvHandle = &kv; // into_inner works - let _inner: KvHandle = kv.into_inner(); + let _inner = kv.into_inner(); } // -- Secrets extractor -------------------------------------------------- @@ -1075,8 +1135,7 @@ mod tests { .extensions_mut() .insert(SecretHandle::new(Arc::new(NoopSecretStore))); let ctx = RequestContext::new(request, PathParams::default()); - let result = block_on(Secrets::from_request(&ctx)); - assert!(result.is_ok()); + block_on(Secrets::from_request(&ctx)).expect("Secrets extractor when handle present"); } #[test] diff --git a/crates/edgezero-core/src/handler.rs b/crates/edgezero-core/src/handler.rs index 0696c91..17fd483 100644 --- a/crates/edgezero-core/src/handler.rs +++ b/crates/edgezero-core/src/handler.rs @@ -16,12 +16,10 @@ where Fut: Future> + 'static, Res: IntoResponse, { + #[inline] fn call(&self, ctx: RequestContext) -> HandlerFuture { let fut = (self)(ctx); - Box::pin(async move { - let response = fut.await?.into_response(); - Ok(response) - }) + Box::pin(async move { fut.await?.into_response() }) } } @@ -35,6 +33,7 @@ impl IntoHandler for H where H: DynHandler + Sized + 'static, { + #[inline] fn into_handler(self) -> BoxHandler { Arc::new(self) } diff --git a/crates/edgezero-core/src/http.rs b/crates/edgezero-core/src/http.rs index 871d9a3..60ead49 100644 --- a/crates/edgezero-core/src/http.rs +++ b/crates/edgezero-core/src/http.rs @@ -1,31 +1,49 @@ +/// Re-exports of [`http::header`] used by adapters and handlers. +pub mod header { + #![expect( + clippy::pub_use, + reason = "header constants/types must be re-exported through this module to satisfy the \ + CLAUDE.md `edgezero_core::http` facade rule; downstream code must not depend on \ + the `http` crate directly" + )] + pub use http::header::*; +} + use std::future::Future; use std::pin::Pin; +use http::request::Builder as HttpRequestBuilder; +use http::response::Builder as HttpResponseBuilder; + use crate::body::Body; use crate::error::EdgeError; -pub use http::header; -pub use http::request::Builder as RequestBuilder; -pub use http::response::Builder as ResponseBuilder; - -pub type Method = http::Method; -pub type StatusCode = http::StatusCode; +// CLAUDE.md mandates that application code never imports from the `http` +// crate directly — every HTTP type must come through `edgezero_core::http`. +// `Builder` types are exposed via `pub type` aliases (not `pub use`) so +// only the `header` re-export remains, scoped to its own child module. +pub type Extensions = http::Extensions; +pub type HandlerFuture = Pin> + 'static>>; pub type HeaderMap = http::HeaderMap; +pub type HeaderName = header::HeaderName; pub type HeaderValue = http::HeaderValue; -pub type HeaderName = http::header::HeaderName; +pub type Method = http::Method; +pub type Request = http::Request; +pub type RequestBuilder = HttpRequestBuilder; +pub type Response = http::Response; +pub type ResponseBuilder = HttpResponseBuilder; +pub type StatusCode = http::StatusCode; pub type Uri = http::Uri; pub type Version = http::Version; -pub type Extensions = http::Extensions; +#[must_use] +#[inline] pub fn request_builder() -> RequestBuilder { http::Request::builder() } +#[must_use] +#[inline] pub fn response_builder() -> ResponseBuilder { http::Response::builder() } - -pub type Request = http::Request; -pub type Response = http::Response; - -pub type HandlerFuture = Pin> + 'static>>; diff --git a/crates/edgezero-core/src/key_value_store.rs b/crates/edgezero-core/src/key_value_store.rs index 1e7b535..04cca5d 100644 --- a/crates/edgezero-core/src/key_value_store.rs +++ b/crates/edgezero-core/src/key_value_store.rs @@ -55,188 +55,266 @@ use serde::{Deserialize, Serialize}; use crate::error::EdgeError; // --------------------------------------------------------------------------- -// Error +// Contract test macro // --------------------------------------------------------------------------- -/// Errors returned by KV store operations. -#[derive(Debug, thiserror::Error)] -pub enum KvError { - /// The requested key was not found (used by `delete` when strict). - #[error("key not found: {key}")] - NotFound { key: String }, +/// Generate a suite of contract tests for any [`KvStore`] implementation. +/// +/// The macro takes the module name and a factory expression that produces a +/// fresh store instance (implementing `KvStore`). It generates a module +/// containing tests that verify the fundamental behaviours every backend +/// must satisfy. +/// +/// # Example +/// +/// ```rust,ignore +/// edgezero_core::key_value_store_contract_tests!(persistent_kv_contract, { +/// let db_path = std::env::temp_dir().join(format!( +/// "edgezero-contract-{}-{:?}.redb", +/// std::process::id(), +/// std::thread::current().id() +/// )); +/// PersistentKvStore::new(db_path).unwrap() +/// }); +/// ``` +#[macro_export] +macro_rules! key_value_store_contract_tests { + ($mod_name:ident, $factory:expr) => { + mod $mod_name { + use super::*; + use bytes::Bytes; + use $crate::key_value_store::KvStore; - /// The KV store backend is temporarily unavailable. - #[error("kv store unavailable")] - Unavailable, + fn run(future: Fut) -> Fut::Output { + ::futures::executor::block_on(future) + } - /// A validation error (e.g., invalid key or value). - #[error("validation error: {0}")] - Validation(String), + #[test] + fn contract_put_and_get() { + let store = $factory; + run(async { + store.put_bytes("k", Bytes::from("v")).await.unwrap(); + assert_eq!(store.get_bytes("k").await.unwrap(), Some(Bytes::from("v"))); + }); + } - /// A serialization or deserialization error. - #[error("serialization error: {0}")] - Serialization(#[from] serde_json::Error), + #[test] + fn contract_get_missing_returns_none() { + let store = $factory; + run(async { + assert_eq!(store.get_bytes("missing").await.unwrap(), None); + }); + } - /// A general internal error. - #[error("kv store error: {0}")] - Internal(#[from] anyhow::Error), -} + #[test] + fn contract_put_overwrites() { + let store = $factory; + run(async { + store.put_bytes("k", Bytes::from("first")).await.unwrap(); + store.put_bytes("k", Bytes::from("second")).await.unwrap(); + assert_eq!( + store.get_bytes("k").await.unwrap(), + Some(Bytes::from("second")) + ); + }); + } -/// A single page of keys from a KV listing operation. -/// -/// The `cursor` is opaque. Pass it back to `list_keys_page` to continue -/// listing from the next page. `None` means the current page is the last page. -#[derive(Debug, Clone, Default, PartialEq, Eq)] -pub struct KvPage { - pub keys: Vec, - pub cursor: Option, -} + #[test] + fn contract_delete_removes_key() { + let store = $factory; + run(async { + store.put_bytes("k", Bytes::from("v")).await.unwrap(); + store.delete("k").await.unwrap(); + assert_eq!(store.get_bytes("k").await.unwrap(), None); + }); + } -#[derive(Debug, Serialize, Deserialize)] -struct KvCursorEnvelope { - prefix: String, - cursor: String, -} + #[test] + fn contract_delete_nonexistent_ok() { + let store = $factory; + run(async { + store.delete("nope").await.unwrap(); + }); + } -impl From for EdgeError { - fn from(err: KvError) -> Self { - match err { - KvError::NotFound { key } => EdgeError::not_found(format!("kv key: {key}")), - KvError::Unavailable => EdgeError::service_unavailable("kv store unavailable"), - KvError::Validation(e) => EdgeError::bad_request(format!("kv validation error: {e}")), - KvError::Serialization(e) => { - EdgeError::internal(anyhow::anyhow!("kv serialization error: {e}")) + #[test] + fn contract_exists() { + let store = $factory; + run(async { + assert!(!store.exists("k").await.unwrap()); + store.put_bytes("k", Bytes::from("v")).await.unwrap(); + assert!(store.exists("k").await.unwrap()); + store.delete("k").await.unwrap(); + assert!(!store.exists("k").await.unwrap()); + }); } - KvError::Internal(e) => EdgeError::internal(e), - } - } -} -// --------------------------------------------------------------------------- -// Trait -// --------------------------------------------------------------------------- + #[test] + fn contract_put_with_ttl_stores_value() { + let store = $factory; + run(async { + store + .put_bytes_with_ttl( + "ttl_key", + Bytes::from("ttl_val"), + std::time::Duration::from_secs(300), + ) + .await + .unwrap(); + assert_eq!( + store.get_bytes("ttl_key").await.unwrap(), + Some(Bytes::from("ttl_val")) + ); + }); + } -/// Object-safe interface for KV store backends. -/// -/// All methods take `&self` — backends handle concurrency internally -/// (e.g., platform APIs, or `Mutex` for in-memory stores). -/// -/// # Pre-validation contract -/// -/// This trait is always called through [`KvHandle`], which validates all -/// inputs (key length/format, value size, TTL bounds, list limits) before -/// delegating here. Implementations may therefore assume that: -/// - Keys are non-empty and within [`KvHandle::MAX_KEY_SIZE`] -/// - Values are within [`KvHandle::MAX_VALUE_SIZE`] -/// - TTLs are within `[MIN_TTL, MAX_TTL]` -/// - List limits are within `[1, MAX_LIST_PAGE_SIZE]` -/// -/// Do **not** call trait methods directly in production code; always go -/// through [`KvHandle`] to ensure validation is applied. -/// -/// Implementations exist per adapter: -/// - `PersistentKvStore` (axum adapter) — local dev / tests with persistent storage -/// - `FastlyKvStore` (fastly adapter) — Fastly KV Store -/// - `CloudflareKvStore` (cloudflare adapter) — Cloudflare Workers KV -#[async_trait(?Send)] -pub trait KvStore: Send + Sync { - /// Retrieve raw bytes for a key. Returns `Ok(None)` if the key does not exist. - async fn get_bytes(&self, key: &str) -> Result, KvError>; + // `std::thread::sleep` is not available on `wasm32` targets (no + // thread support). The TTL eviction contract is verified on native + // targets only; WASM adapters are expected to delegate eviction to + // the platform runtime (Cloudflare/Fastly), which does not expose a + // synchronous sleep primitive in test environments. + #[cfg(not(target_arch = "wasm32"))] + #[test] + fn contract_ttl_expires() { + let store = $factory; + run(async { + // Uses a sub-second TTL intentionally. Contract tests call + // `KvStore` directly (not `KvHandle`), so the 60-second + // minimum TTL validation is bypassed. This lets us verify + // that the backend actually evicts expired entries. + store + .put_bytes_with_ttl( + "ephemeral", + Bytes::from("gone_soon"), + std::time::Duration::from_millis(1), + ) + .await + .unwrap(); + // Allow the TTL to elapse. 200ms gives the OS scheduler + // enough headroom on busy CI runners. + std::thread::sleep(std::time::Duration::from_millis(200)); + assert_eq!(store.get_bytes("ephemeral").await.unwrap(), None); + }); + } - /// Store raw bytes for a key, overwriting any existing value. - async fn put_bytes(&self, key: &str, value: Bytes) -> Result<(), KvError>; + #[test] + fn contract_list_keys_page_is_paginated() { + let store = $factory; + run(async { + let expected = vec![ + "app/one".to_owned(), + "app/two".to_owned(), + "other/three".to_owned(), + ]; + for key in &expected { + store + .put_bytes(key, Bytes::from(key.clone())) + .await + .unwrap(); + } - /// Store raw bytes with a time-to-live. After `ttl` has elapsed the key - /// should be treated as expired. Eviction timing is backend-specific: - /// - **Axum (`PersistentKvStore`)**: lazy eviction — expired keys are removed - /// on the next `get_bytes` call for that key. Keys never accessed after - /// expiration remain in the database until deleted, so `.edgezero/kv.redb` - /// grows without bound on long-running dev servers. - /// - **Fastly/Cloudflare**: eviction is managed by the platform and is not - /// guaranteed to be immediate. - async fn put_bytes_with_ttl( - &self, - key: &str, - value: Bytes, - ttl: Duration, - ) -> Result<(), KvError>; + let mut cursor = None; + let mut seen = std::collections::HashSet::new(); + let mut collected = Vec::new(); - /// Delete a key. Returns `Ok(())` even if the key did not exist. - async fn delete(&self, key: &str) -> Result<(), KvError>; + for _ in 0..expected.len() { + let page = store + .list_keys_page("", cursor.as_deref(), 1) + .await + .unwrap(); + assert!(page.keys.len() <= 1); + for key in &page.keys { + assert!( + seen.insert(key.clone()), + "duplicate key in pagination: {key}" + ); + collected.push(key.clone()); + } + + cursor = page.cursor; + if cursor.is_none() { + break; + } + } + + collected.sort(); + let mut expected_sorted = expected.clone(); + expected_sorted.sort(); + assert_eq!(collected, expected_sorted); + }); + } + + #[test] + fn contract_list_keys_page_respects_prefix() { + let store = $factory; + run(async { + store + .put_bytes("prefix/a", Bytes::from_static(b"a")) + .await + .unwrap(); + store + .put_bytes("prefix/b", Bytes::from_static(b"b")) + .await + .unwrap(); + store + .put_bytes("other/c", Bytes::from_static(b"c")) + .await + .unwrap(); - /// List keys in lexicographic order, returning at most `limit` keys. - /// - /// The `cursor` is opaque. Pass the cursor from a previous page back to - /// continue listing. Implementations should keep memory usage bounded to a - /// single page worth of keys. - async fn list_keys_page( - &self, - prefix: &str, - cursor: Option<&str>, - limit: usize, - ) -> Result; + let first = store.list_keys_page("prefix/", None, 1).await.unwrap(); + assert_eq!(first.keys.len(), 1); + assert!(first.keys[0].starts_with("prefix/")); - /// Check whether a key exists. - /// - /// The default implementation delegates to `get_bytes`. Backends that - /// support a cheaper existence check should override this. - async fn exists(&self, key: &str) -> Result { - Ok(self.get_bytes(key).await?.is_some()) - } + let second = store + .list_keys_page("prefix/", first.cursor.as_deref(), 1) + .await + .unwrap(); + assert!(second.keys.iter().all(|key| key.starts_with("prefix/"))); + assert!(first + .keys + .iter() + .chain(second.keys.iter()) + .all(|key| key.starts_with("prefix/"))); + }); + } + } + }; } // --------------------------------------------------------------------------- -// Test-only no-op store +// Error // --------------------------------------------------------------------------- -/// A no-op [`KvStore`] for tests that only need a [`KvHandle`] to exist -/// without storing real data. -/// -/// All reads return `None` / empty; all writes succeed silently. -/// -/// Available in `#[cfg(test)]` builds within this crate, and in any downstream -/// crate that enables the `test-utils` feature on `edgezero-core`: -/// -/// ```toml -/// [dev-dependencies] -/// edgezero-core = { path = "...", features = ["test-utils"] } -/// ``` -#[cfg(any(test, feature = "test-utils"))] -pub struct NoopKvStore; - -#[cfg(any(test, feature = "test-utils"))] -#[async_trait(?Send)] -impl KvStore for NoopKvStore { - async fn get_bytes(&self, _key: &str) -> Result, KvError> { - Ok(None) - } - async fn put_bytes(&self, _key: &str, _value: Bytes) -> Result<(), KvError> { - Ok(()) - } - async fn put_bytes_with_ttl( - &self, - _key: &str, - _value: Bytes, - _ttl: Duration, - ) -> Result<(), KvError> { - Ok(()) - } - async fn delete(&self, _key: &str) -> Result<(), KvError> { - Ok(()) - } - async fn list_keys_page( - &self, - _prefix: &str, - _cursor: Option<&str>, - _limit: usize, - ) -> Result { - Ok(KvPage::default()) - } +#[derive(Debug, Serialize, Deserialize)] +struct KvCursorEnvelope { + cursor: String, + prefix: String, } -// --------------------------------------------------------------------------- -// Handle -// --------------------------------------------------------------------------- +/// Errors returned by KV store operations. +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +pub enum KvError { + /// A general internal error. + #[error("kv store error: {0}")] + Internal(#[from] anyhow::Error), + + /// The requested key was not found (used by `delete` when strict). + #[error("key not found: {key}")] + NotFound { key: String }, + + /// A serialization or deserialization error. + #[error("serialization error: {0}")] + Serialization(#[from] serde_json::Error), + + /// The KV store backend is temporarily unavailable. + #[error("kv store unavailable")] + Unavailable, + + /// A validation error (e.g., invalid key or value). + #[error("validation error: {0}")] + Validation(String), +} /// A cloneable, ergonomic handle to a KV store. /// @@ -259,6 +337,7 @@ pub struct KvHandle { } impl fmt::Debug for KvHandle { + #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("KvHandle").finish_non_exhaustive() } @@ -268,147 +347,79 @@ impl KvHandle { /// Maximum key size in bytes (Cloudflare limit). pub const MAX_KEY_SIZE: usize = 512; - /// Maximum value size in bytes (Standard limit). - pub const MAX_VALUE_SIZE: usize = 25 * 1024 * 1024; - - /// Minimum TTL in seconds (Cloudflare limit). - pub const MIN_TTL: Duration = Duration::from_secs(60); - - /// Maximum TTL (1 year). Prevents overflow when adding to `SystemTime::now()`. - pub const MAX_TTL: Duration = Duration::from_secs(365 * 24 * 60 * 60); - /// Maximum number of keys returned from a single page. pub const MAX_LIST_PAGE_SIZE: usize = 1_000; - /// Create a new handle wrapping a KV store implementation. - pub fn new(store: Arc) -> Self { - Self { store } - } - - // -- Validation --------------------------------------------------------- - - fn validate_key(key: &str) -> Result<(), KvError> { - if key.is_empty() { - return Err(KvError::Validation("key cannot be empty".to_string())); - } - if key.len() > Self::MAX_KEY_SIZE { - return Err(KvError::Validation(format!( - "key length {} exceeds limit of {} bytes", - key.len(), - Self::MAX_KEY_SIZE - ))); - } - if key == "." || key == ".." { - return Err(KvError::Validation( - "key cannot be exactly '.' or '..'".to_string(), - )); - } - if key.chars().any(|c| c.is_control()) { - return Err(KvError::Validation( - "key contains invalid control characters".to_string(), - )); - } - Ok(()) - } - - fn validate_value(value: &[u8]) -> Result<(), KvError> { - if value.len() > Self::MAX_VALUE_SIZE { - return Err(KvError::Validation(format!( - "value size {} exceeds limit of {} bytes", - value.len(), - Self::MAX_VALUE_SIZE - ))); - } - Ok(()) - } - - fn validate_ttl(ttl: Duration) -> Result<(), KvError> { - if ttl < Self::MIN_TTL { - return Err(KvError::Validation(format!( - "TTL {:?} is less than minimum of at least 60 seconds", - ttl - ))); - } - if ttl > Self::MAX_TTL { - return Err(KvError::Validation(format!( - "TTL {:?} exceeds maximum of 1 year", - ttl - ))); - } - Ok(()) - } + /// Maximum TTL (1 year). Prevents overflow when adding to `SystemTime::now()`. + pub const MAX_TTL: Duration = Duration::from_secs(365 * 24 * 60 * 60); - fn validate_prefix(prefix: &str) -> Result<(), KvError> { - if prefix.len() > Self::MAX_KEY_SIZE { - return Err(KvError::Validation(format!( - "prefix length {} exceeds limit of {} bytes", - prefix.len(), - Self::MAX_KEY_SIZE - ))); - } - if prefix.chars().any(|c| c.is_control()) { - return Err(KvError::Validation( - "prefix contains invalid control characters".to_string(), - )); - } - Ok(()) - } + /// Maximum value size in bytes (Standard limit). + pub const MAX_VALUE_SIZE: usize = 25 * 1024 * 1024; - fn validate_list_limit(limit: usize) -> Result<(), KvError> { - if limit == 0 { - return Err(KvError::Validation( - "list limit must be greater than zero".to_string(), - )); - } - if limit > Self::MAX_LIST_PAGE_SIZE { - return Err(KvError::Validation(format!( - "list limit {} exceeds maximum of {}", - limit, - Self::MAX_LIST_PAGE_SIZE - ))); - } - Ok(()) - } + /// Minimum TTL in seconds (Cloudflare limit). + pub const MIN_TTL: Duration = Duration::from_secs(60); fn decode_list_cursor(prefix: &str, cursor: Option<&str>) -> Result, KvError> { - let Some(cursor) = cursor else { + let Some(encoded) = cursor else { return Ok(None); }; - let envelope: KvCursorEnvelope = serde_json::from_str(cursor) - .map_err(|_| KvError::Validation("list cursor is invalid or corrupted".to_string()))?; + let envelope: KvCursorEnvelope = serde_json::from_str(encoded) + .map_err(|_e| KvError::Validation("list cursor is invalid or corrupted".to_owned()))?; if envelope.prefix != prefix { return Err(KvError::Validation( - "list cursor does not match the requested prefix".to_string(), + "list cursor does not match the requested prefix".to_owned(), )); } if envelope.cursor.is_empty() { return Err(KvError::Validation( - "list cursor payload cannot be empty".to_string(), + "list cursor payload cannot be empty".to_owned(), )); } Ok(Some(envelope.cursor)) } + /// Delete a key. + /// + /// # Errors + /// Returns [`KvError`] if the backend rejects the delete. + #[inline] + pub async fn delete(&self, key: &str) -> Result<(), KvError> { + Self::validate_key(key)?; + self.store.delete(key).await + } + fn encode_list_cursor(prefix: &str, cursor: Option) -> Result, KvError> { cursor - .map(|cursor| { + .map(|inner| { serde_json::to_string(&KvCursorEnvelope { - prefix: prefix.to_string(), - cursor, + cursor: inner, + prefix: prefix.to_owned(), }) .map_err(KvError::from) }) .transpose() } - // -- Typed helpers (JSON) ----------------------------------------------- + /// Check whether a key exists without deserializing its value. + /// + /// # Errors + /// Returns [`KvError`] if the backend lookup fails. + #[inline] + pub async fn exists(&self, key: &str) -> Result { + Self::validate_key(key)?; + self.store.exists(key).await + } /// Get a value by key, deserializing from JSON. /// /// Returns `Ok(None)` if the key does not exist. + /// + /// # Errors + /// Returns [`KvError`] if the lookup fails or the stored bytes cannot be deserialized into `T`. + #[inline] pub async fn get(&self, key: &str) -> Result, KvError> { Self::validate_key(key)?; match self.store.get_bytes(key).await? { @@ -420,12 +431,66 @@ impl KvHandle { } } + /// Get raw bytes for a key. + /// + /// # Errors + /// Returns [`KvError`] if the backend lookup fails. + #[inline] + pub async fn get_bytes(&self, key: &str) -> Result, KvError> { + Self::validate_key(key)?; + self.store.get_bytes(key).await + } + /// Get a value by key, returning `default` if the key does not exist. + /// + /// # Errors + /// Returns [`KvError`] if the lookup fails or the stored bytes cannot be deserialized into `T`. + #[inline] pub async fn get_or(&self, key: &str, default: T) -> Result { Ok(self.get(key).await?.unwrap_or(default)) } + /// List keys in a bounded, paginated fashion. + /// + /// The cursor is opaque, prefix-bound, and should be passed back unchanged + /// with the same prefix to retrieve the next page. Listings are not atomic + /// snapshots and may reflect concurrent writes or provider-level eventual + /// consistency. + /// + /// # Errors + /// Returns [`KvError::Validation`] if `cursor` is malformed or `prefix` exceeds backend limits; [`KvError::Internal`] on backend failure. + #[inline] + pub async fn list_keys_page( + &self, + prefix: &str, + cursor: Option<&str>, + limit: usize, + ) -> Result { + Self::validate_prefix(prefix)?; + Self::validate_list_limit(limit)?; + let decoded_cursor = Self::decode_list_cursor(prefix, cursor)?; + let page = self + .store + .list_keys_page(prefix, decoded_cursor.as_deref(), limit) + .await?; + + Ok(KvPage { + cursor: Self::encode_list_cursor(prefix, page.cursor)?, + keys: page.keys, + }) + } + + /// Create a new handle wrapping a KV store implementation. + #[inline] + pub fn new(store: Arc) -> Self { + Self { store } + } + /// Put a value, serializing it to JSON. + /// + /// # Errors + /// Returns [`KvError`] if the value cannot be serialized or the backend rejects the write. + #[inline] pub async fn put(&self, key: &str, value: &T) -> Result<(), KvError> { Self::validate_key(key)?; let bytes = serde_json::to_vec(value)?; @@ -433,7 +498,39 @@ impl KvHandle { self.store.put_bytes(key, Bytes::from(bytes)).await } + /// Put raw bytes for a key. + /// + /// # Errors + /// Returns [`KvError::Validation`] for invalid keys or oversized values; [`KvError::Internal`] on backend failure. + #[inline] + pub async fn put_bytes(&self, key: &str, value: Bytes) -> Result<(), KvError> { + Self::validate_key(key)?; + Self::validate_value(&value)?; + self.store.put_bytes(key, value).await + } + + /// Put raw bytes with a TTL. + /// + /// # Errors + /// Returns [`KvError::Validation`] for invalid input; [`KvError::Internal`] on backend failure. + #[inline] + pub async fn put_bytes_with_ttl( + &self, + key: &str, + value: Bytes, + ttl: Duration, + ) -> Result<(), KvError> { + Self::validate_key(key)?; + Self::validate_ttl(ttl)?; + Self::validate_value(&value)?; + self.store.put_bytes_with_ttl(key, value, ttl).await + } + /// Put a value with a TTL, serializing it to JSON. + /// + /// # Errors + /// Returns [`KvError`] if the value cannot be serialized or the backend rejects the write. + #[inline] pub async fn put_with_ttl( &self, key: &str, @@ -461,312 +558,262 @@ impl KvHandle { /// calls to the backend. Concurrent calls on the same key may cause /// lost writes. Use this only when eventual consistency is acceptable /// (e.g., approximate counters). - pub async fn read_modify_write(&self, key: &str, default: T, f: F) -> Result + /// + /// # Errors + /// Returns [`KvError`] if any of the read, mutate, or write steps fail. + #[inline] + pub async fn read_modify_write( + &self, + key: &str, + default: T, + mutator: Mutator, + ) -> Result where T: DeserializeOwned + Serialize, - F: FnOnce(T) -> T, + Mutator: FnOnce(T) -> T, { // Validation happens in get_or and put let current = self.get_or(key, default).await?; - let updated = f(current); + let updated = mutator(current); self.put(key, &updated).await?; Ok(updated) } - // -- Raw bytes ---------------------------------------------------------- - - /// Get raw bytes for a key. - pub async fn get_bytes(&self, key: &str) -> Result, KvError> { - Self::validate_key(key)?; - self.store.get_bytes(key).await - } - - /// Put raw bytes for a key. - pub async fn put_bytes(&self, key: &str, value: Bytes) -> Result<(), KvError> { - Self::validate_key(key)?; - Self::validate_value(&value)?; - self.store.put_bytes(key, value).await - } - - /// Put raw bytes with a TTL. - pub async fn put_bytes_with_ttl( - &self, - key: &str, - value: Bytes, - ttl: Duration, - ) -> Result<(), KvError> { - Self::validate_key(key)?; - Self::validate_ttl(ttl)?; - Self::validate_value(&value)?; - self.store.put_bytes_with_ttl(key, value, ttl).await + fn validate_key(key: &str) -> Result<(), KvError> { + if key.is_empty() { + return Err(KvError::Validation("key cannot be empty".to_owned())); + } + if key.len() > Self::MAX_KEY_SIZE { + return Err(KvError::Validation(format!( + "key length {} exceeds limit of {} bytes", + key.len(), + Self::MAX_KEY_SIZE + ))); + } + if key == "." || key == ".." { + return Err(KvError::Validation( + "key cannot be exactly '.' or '..'".to_owned(), + )); + } + if key.chars().any(char::is_control) { + return Err(KvError::Validation( + "key contains invalid control characters".to_owned(), + )); + } + Ok(()) } - // -- Other operations --------------------------------------------------- - - /// Check whether a key exists without deserializing its value. - pub async fn exists(&self, key: &str) -> Result { - Self::validate_key(key)?; - self.store.exists(key).await + fn validate_list_limit(limit: usize) -> Result<(), KvError> { + if limit == 0 { + return Err(KvError::Validation( + "list limit must be greater than zero".to_owned(), + )); + } + if limit > Self::MAX_LIST_PAGE_SIZE { + return Err(KvError::Validation(format!( + "list limit {} exceeds maximum of {}", + limit, + Self::MAX_LIST_PAGE_SIZE + ))); + } + Ok(()) } - /// Delete a key. - pub async fn delete(&self, key: &str) -> Result<(), KvError> { - Self::validate_key(key)?; - self.store.delete(key).await + fn validate_prefix(prefix: &str) -> Result<(), KvError> { + if prefix.len() > Self::MAX_KEY_SIZE { + return Err(KvError::Validation(format!( + "prefix length {} exceeds limit of {} bytes", + prefix.len(), + Self::MAX_KEY_SIZE + ))); + } + if prefix.chars().any(char::is_control) { + return Err(KvError::Validation( + "prefix contains invalid control characters".to_owned(), + )); + } + Ok(()) } - /// List keys in a bounded, paginated fashion. - /// - /// The cursor is opaque, prefix-bound, and should be passed back unchanged - /// with the same prefix to retrieve the next page. Listings are not atomic - /// snapshots and may reflect concurrent writes or provider-level eventual - /// consistency. - pub async fn list_keys_page( - &self, - prefix: &str, - cursor: Option<&str>, - limit: usize, - ) -> Result { - Self::validate_prefix(prefix)?; - Self::validate_list_limit(limit)?; - let decoded_cursor = Self::decode_list_cursor(prefix, cursor)?; - let page = self - .store - .list_keys_page(prefix, decoded_cursor.as_deref(), limit) - .await?; - - Ok(KvPage { - keys: page.keys, - cursor: Self::encode_list_cursor(prefix, page.cursor)?, - }) + fn validate_ttl(ttl: Duration) -> Result<(), KvError> { + if ttl < Self::MIN_TTL { + return Err(KvError::Validation(format!( + "TTL {ttl:?} is less than minimum of at least 60 seconds" + ))); + } + if ttl > Self::MAX_TTL { + return Err(KvError::Validation(format!( + "TTL {ttl:?} exceeds maximum of 1 year" + ))); + } + Ok(()) } -} - -// --------------------------------------------------------------------------- -// Contract test macro -// --------------------------------------------------------------------------- - -/// Generate a suite of contract tests for any [`KvStore`] implementation. -/// -/// The macro takes the module name and a factory expression that produces a -/// fresh store instance (implementing `KvStore`). It generates a module -/// containing tests that verify the fundamental behaviours every backend -/// must satisfy. -/// -/// # Example -/// -/// ```rust,ignore -/// edgezero_core::key_value_store_contract_tests!(persistent_kv_contract, { -/// let db_path = std::env::temp_dir().join(format!( -/// "edgezero-contract-{}-{:?}.redb", -/// std::process::id(), -/// std::thread::current().id() -/// )); -/// PersistentKvStore::new(db_path).unwrap() -/// }); -/// ``` -#[macro_export] -macro_rules! key_value_store_contract_tests { - ($mod_name:ident, $factory:expr) => { - mod $mod_name { - use super::*; - use bytes::Bytes; - use $crate::key_value_store::KvStore; - - fn run(f: F) -> F::Output { - futures::executor::block_on(f) - } - - #[test] - fn contract_put_and_get() { - let store = $factory; - run(async { - store.put_bytes("k", Bytes::from("v")).await.unwrap(); - assert_eq!(store.get_bytes("k").await.unwrap(), Some(Bytes::from("v"))); - }); - } - - #[test] - fn contract_get_missing_returns_none() { - let store = $factory; - run(async { - assert_eq!(store.get_bytes("missing").await.unwrap(), None); - }); - } - - #[test] - fn contract_put_overwrites() { - let store = $factory; - run(async { - store.put_bytes("k", Bytes::from("first")).await.unwrap(); - store.put_bytes("k", Bytes::from("second")).await.unwrap(); - assert_eq!( - store.get_bytes("k").await.unwrap(), - Some(Bytes::from("second")) - ); - }); - } - - #[test] - fn contract_delete_removes_key() { - let store = $factory; - run(async { - store.put_bytes("k", Bytes::from("v")).await.unwrap(); - store.delete("k").await.unwrap(); - assert_eq!(store.get_bytes("k").await.unwrap(), None); - }); - } - - #[test] - fn contract_delete_nonexistent_ok() { - let store = $factory; - run(async { - store.delete("nope").await.unwrap(); - }); - } - - #[test] - fn contract_exists() { - let store = $factory; - run(async { - assert!(!store.exists("k").await.unwrap()); - store.put_bytes("k", Bytes::from("v")).await.unwrap(); - assert!(store.exists("k").await.unwrap()); - store.delete("k").await.unwrap(); - assert!(!store.exists("k").await.unwrap()); - }); - } - #[test] - fn contract_put_with_ttl_stores_value() { - let store = $factory; - run(async { - store - .put_bytes_with_ttl( - "ttl_key", - Bytes::from("ttl_val"), - std::time::Duration::from_secs(300), - ) - .await - .unwrap(); - assert_eq!( - store.get_bytes("ttl_key").await.unwrap(), - Some(Bytes::from("ttl_val")) - ); - }); - } + fn validate_value(value: &[u8]) -> Result<(), KvError> { + if value.len() > Self::MAX_VALUE_SIZE { + return Err(KvError::Validation(format!( + "value size {} exceeds limit of {} bytes", + value.len(), + Self::MAX_VALUE_SIZE + ))); + } + Ok(()) + } +} - // `std::thread::sleep` is not available on `wasm32` targets (no - // thread support). The TTL eviction contract is verified on native - // targets only; WASM adapters are expected to delegate eviction to - // the platform runtime (Cloudflare/Fastly), which does not expose a - // synchronous sleep primitive in test environments. - #[cfg(not(target_arch = "wasm32"))] - #[test] - fn contract_ttl_expires() { - let store = $factory; - run(async { - // Uses a sub-second TTL intentionally. Contract tests call - // `KvStore` directly (not `KvHandle`), so the 60-second - // minimum TTL validation is bypassed. This lets us verify - // that the backend actually evicts expired entries. - store - .put_bytes_with_ttl( - "ephemeral", - Bytes::from("gone_soon"), - std::time::Duration::from_millis(1), - ) - .await - .unwrap(); - // Allow the TTL to elapse. 200ms gives the OS scheduler - // enough headroom on busy CI runners. - std::thread::sleep(std::time::Duration::from_millis(200)); - assert_eq!(store.get_bytes("ephemeral").await.unwrap(), None); - }); +impl From for EdgeError { + #[inline] + fn from(err: KvError) -> Self { + match err { + KvError::NotFound { key } => EdgeError::not_found(format!("kv key: {key}")), + KvError::Unavailable => EdgeError::service_unavailable("kv store unavailable"), + KvError::Validation(msg) => { + EdgeError::bad_request(format!("kv validation error: {msg}")) + } + KvError::Serialization(msg) => { + EdgeError::internal(anyhow::anyhow!("kv serialization error: {msg}")) } + KvError::Internal(source) => EdgeError::internal(source), + } + } +} - #[test] - fn contract_list_keys_page_is_paginated() { - let store = $factory; - run(async { - let expected = vec![ - "app/one".to_string(), - "app/two".to_string(), - "other/three".to_string(), - ]; - for key in &expected { - store - .put_bytes(key, Bytes::from(key.clone())) - .await - .unwrap(); - } +/// A single page of keys from a KV listing operation. +/// +/// The `cursor` is opaque. Pass it back to `list_keys_page` to continue +/// listing from the next page. `None` means the current page is the last page. +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub struct KvPage { + pub cursor: Option, + pub keys: Vec, +} - let mut cursor = None; - let mut seen = std::collections::HashSet::new(); - let mut collected = Vec::new(); +/// Object-safe interface for KV store backends. +/// +/// All methods take `&self` — backends handle concurrency internally +/// (e.g., platform APIs, or `Mutex` for in-memory stores). +/// +/// # Pre-validation contract +/// +/// This trait is always called through [`KvHandle`], which validates all +/// inputs (key length/format, value size, TTL bounds, list limits) before +/// delegating here. Implementations may therefore assume that: +/// - Keys are non-empty and within [`KvHandle::MAX_KEY_SIZE`] +/// - Values are within [`KvHandle::MAX_VALUE_SIZE`] +/// - TTLs are within `[MIN_TTL, MAX_TTL]` +/// - List limits are within `[1, MAX_LIST_PAGE_SIZE]` +/// +/// Do **not** call trait methods directly in production code; always go +/// through [`KvHandle`] to ensure validation is applied. +/// +/// Implementations exist per adapter: +/// - `PersistentKvStore` (axum adapter) — local dev / tests with persistent storage +/// - `FastlyKvStore` (fastly adapter) — Fastly KV Store +/// - `CloudflareKvStore` (cloudflare adapter) — Cloudflare Workers KV +#[async_trait(?Send)] +pub trait KvStore: Send + Sync { + /// Delete a key. Returns `Ok(())` even if the key did not exist. + async fn delete(&self, key: &str) -> Result<(), KvError>; - for _ in 0..expected.len() { - let page = store - .list_keys_page("", cursor.as_deref(), 1) - .await - .unwrap(); - assert!(page.keys.len() <= 1); - for key in &page.keys { - assert!( - seen.insert(key.clone()), - "duplicate key in pagination: {key}" - ); - collected.push(key.clone()); - } + /// Check whether a key exists. + /// + /// The default implementation delegates to `get_bytes`. Backends that + /// support a cheaper existence check should override this. + #[inline] + async fn exists(&self, key: &str) -> Result { + Ok(self.get_bytes(key).await?.is_some()) + } - cursor = page.cursor; - if cursor.is_none() { - break; - } - } + /// Retrieve raw bytes for a key. Returns `Ok(None)` if the key does not exist. + async fn get_bytes(&self, key: &str) -> Result, KvError>; - collected.sort(); - let mut expected_sorted = expected.clone(); - expected_sorted.sort(); - assert_eq!(collected, expected_sorted); - }); - } + /// List keys in lexicographic order, returning at most `limit` keys. + /// + /// The `cursor` is opaque. Pass the cursor from a previous page back to + /// continue listing. Implementations should keep memory usage bounded to a + /// single page worth of keys. + async fn list_keys_page( + &self, + prefix: &str, + cursor: Option<&str>, + limit: usize, + ) -> Result; - #[test] - fn contract_list_keys_page_respects_prefix() { - let store = $factory; - run(async { - store - .put_bytes("prefix/a", Bytes::from_static(b"a")) - .await - .unwrap(); - store - .put_bytes("prefix/b", Bytes::from_static(b"b")) - .await - .unwrap(); - store - .put_bytes("other/c", Bytes::from_static(b"c")) - .await - .unwrap(); + /// Store raw bytes for a key, overwriting any existing value. + async fn put_bytes(&self, key: &str, value: Bytes) -> Result<(), KvError>; - let first = store.list_keys_page("prefix/", None, 1).await.unwrap(); - assert_eq!(first.keys.len(), 1); - assert!(first.keys[0].starts_with("prefix/")); + /// Store raw bytes with a time-to-live. After `ttl` has elapsed the key + /// should be treated as expired. Eviction timing is backend-specific: + /// - **Axum (`PersistentKvStore`)**: lazy eviction — expired keys are removed + /// on the next `get_bytes` call for that key. Keys never accessed after + /// expiration remain in the database until deleted, so `.edgezero/kv.redb` + /// grows without bound on long-running dev servers. + /// - **Fastly/Cloudflare**: eviction is managed by the platform and is not + /// guaranteed to be immediate. + async fn put_bytes_with_ttl( + &self, + key: &str, + value: Bytes, + ttl: Duration, + ) -> Result<(), KvError>; +} - let second = store - .list_keys_page("prefix/", first.cursor.as_deref(), 1) - .await - .unwrap(); - assert!(second.keys.iter().all(|key| key.starts_with("prefix/"))); - assert!(first - .keys - .iter() - .chain(second.keys.iter()) - .all(|key| key.starts_with("prefix/"))); - }); - } - } - }; +// --------------------------------------------------------------------------- +// Test-only no-op store +// --------------------------------------------------------------------------- + +/// A no-op [`KvStore`] for tests that only need a [`KvHandle`] to exist +/// without storing real data. +/// +/// All reads return `None` / empty; all writes succeed silently. +/// +/// Available in `#[cfg(test)]` builds within this crate, and in any downstream +/// crate that enables the `test-utils` feature on `edgezero-core`: +/// +/// ```toml +/// [dev-dependencies] +/// edgezero-core = { path = "...", features = ["test-utils"] } +/// ``` +#[cfg(any(test, feature = "test-utils"))] +pub struct NoopKvStore; + +#[cfg(any(test, feature = "test-utils"))] +#[async_trait(?Send)] +impl KvStore for NoopKvStore { + #[inline] + async fn delete(&self, _key: &str) -> Result<(), KvError> { + Ok(()) + } + #[inline] + async fn exists(&self, _key: &str) -> Result { + Ok(false) + } + #[inline] + async fn get_bytes(&self, _key: &str) -> Result, KvError> { + Ok(None) + } + #[inline] + async fn list_keys_page( + &self, + _prefix: &str, + _cursor: Option<&str>, + _limit: usize, + ) -> Result { + Ok(KvPage::default()) + } + #[inline] + async fn put_bytes(&self, _key: &str, _value: Bytes) -> Result<(), KvError> { + Ok(()) + } + #[inline] + async fn put_bytes_with_ttl( + &self, + _key: &str, + _value: Bytes, + _ttl: Duration, + ) -> Result<(), KvError> { + Ok(()) + } } // --------------------------------------------------------------------------- @@ -775,28 +822,39 @@ macro_rules! key_value_store_contract_tests { #[cfg(test)] mod tests { + // Run the shared contract tests against MockStore. + crate::key_value_store_contract_tests!(mock_store_contract, MockStore::new()); + use super::*; use crate::http::StatusCode; + use futures::executor::block_on; use std::collections::HashMap; use std::sync::Mutex; use std::time::SystemTime; + #[derive(serde::Serialize, serde::Deserialize, PartialEq, Debug)] + struct Counter { + count: i32, + } + // In-memory store with TTL support for contract testing. // Uses `SystemTime` instead of `Instant` for WASM compatibility. struct MockStore { data: Mutex)>>, } - impl MockStore { - fn new() -> Self { - Self { - data: Mutex::new(HashMap::new()), - } + #[async_trait(?Send)] + impl KvStore for MockStore { + async fn delete(&self, key: &str) -> Result<(), KvError> { + let mut data = self.data.lock().unwrap(); + data.remove(key); + Ok(()) + } + + async fn exists(&self, key: &str) -> Result { + Ok(self.get_bytes(key).await?.is_some()) } - } - #[async_trait(?Send)] - impl KvStore for MockStore { async fn get_bytes(&self, key: &str) -> Result, KvError> { let mut data = self.data.lock().unwrap(); if let Some((_, Some(exp))) = data.get(key) { @@ -805,30 +863,7 @@ mod tests { return Ok(None); } } - Ok(data.get(key).map(|(v, _)| v.clone())) - } - - async fn put_bytes(&self, key: &str, value: Bytes) -> Result<(), KvError> { - let mut data = self.data.lock().unwrap(); - data.insert(key.to_string(), (value, None)); - Ok(()) - } - - async fn put_bytes_with_ttl( - &self, - key: &str, - value: Bytes, - ttl: Duration, - ) -> Result<(), KvError> { - let mut data = self.data.lock().unwrap(); - data.insert(key.to_string(), (value, Some(SystemTime::now() + ttl))); - Ok(()) - } - - async fn delete(&self, key: &str) -> Result<(), KvError> { - let mut data = self.data.lock().unwrap(); - data.remove(key); - Ok(()) + Ok(data.get(key).map(|(value, _)| value.clone())) } async fn list_keys_page( @@ -844,7 +879,7 @@ mod tests { let mut keys = data .keys() .filter(|key| { - key.starts_with(prefix) && cursor.is_none_or(|cursor| key.as_str() > cursor) + key.starts_with(prefix) && cursor.is_none_or(|cur| key.as_str() > cur) }) .cloned() .collect::>(); @@ -858,507 +893,519 @@ mod tests { keys, }) } + + async fn put_bytes(&self, key: &str, value: Bytes) -> Result<(), KvError> { + let mut data = self.data.lock().unwrap(); + data.insert(key.to_owned(), (value, None)); + Ok(()) + } + + async fn put_bytes_with_ttl( + &self, + key: &str, + value: Bytes, + ttl: Duration, + ) -> Result<(), KvError> { + let mut data = self.data.lock().unwrap(); + let expires_at = SystemTime::now() + .checked_add(ttl) + .ok_or_else(|| KvError::Internal(anyhow::anyhow!("ttl overflows system time")))?; + data.insert(key.to_owned(), (value, Some(expires_at))); + Ok(()) + } + } + + impl MockStore { + fn new() -> Self { + Self { + data: Mutex::new(HashMap::new()), + } + } } fn handle() -> KvHandle { KvHandle::new(Arc::new(MockStore::new())) } - // -- Raw bytes ---------------------------------------------------------- - #[test] - fn raw_bytes_roundtrip() { - let h = handle(); - futures::executor::block_on(async { - h.put_bytes("k", Bytes::from("hello")).await.unwrap(); - assert_eq!(h.get_bytes("k").await.unwrap(), Some(Bytes::from("hello"))); + fn delete_missing_key_is_ok() { + let kv = handle(); + block_on(async { + kv.delete("nope").await.unwrap(); }); } #[test] - fn raw_bytes_missing_key_returns_none() { - let h = handle(); - futures::executor::block_on(async { - assert_eq!(h.get_bytes("missing").await.unwrap(), None); + fn delete_removes_key() { + let kv = handle(); + block_on(async { + kv.put_bytes("k", Bytes::from("v")).await.unwrap(); + kv.delete("k").await.unwrap(); + assert_eq!(kv.get_bytes("k").await.unwrap(), None); }); } #[test] - fn raw_bytes_overwrite() { - let h = handle(); - futures::executor::block_on(async { - h.put_bytes("k", Bytes::from("a")).await.unwrap(); - h.put_bytes("k", Bytes::from("b")).await.unwrap(); - assert_eq!(h.get_bytes("k").await.unwrap(), Some(Bytes::from("b"))); + fn empty_key_rejected() { + let kv = handle(); + block_on(async { + let err = kv.put("", &"empty key").await.unwrap_err(); + assert!(matches!(err, KvError::Validation(_))); + assert!(format!("{err}").contains("cannot be empty")); }); } - // -- Typed JSON --------------------------------------------------------- - - #[derive(serde::Serialize, serde::Deserialize, PartialEq, Debug)] - struct Counter { - count: i32, - } - #[test] - fn typed_get_put_roundtrip() { - let h = handle(); - futures::executor::block_on(async { - let data = Counter { count: 42 }; - h.put("counter", &data).await.unwrap(); - let out: Option = h.get("counter").await.unwrap(); - assert_eq!(out, Some(data)); + fn exists_returns_false_after_delete() { + let kv = handle(); + block_on(async { + kv.put_bytes("ephemeral", Bytes::from("v")).await.unwrap(); + assert!(kv.exists("ephemeral").await.unwrap()); + kv.delete("ephemeral").await.unwrap(); + assert!(!kv.exists("ephemeral").await.unwrap()); }); } #[test] - fn typed_get_missing_returns_none() { - let h = handle(); - futures::executor::block_on(async { - let out: Option = h.get("nope").await.unwrap(); - assert_eq!(out, None); + fn exists_returns_false_for_missing() { + let kv = handle(); + block_on(async { + assert!(!kv.exists("nope").await.unwrap()); }); } #[test] - fn typed_get_or_returns_default() { - let h = handle(); - futures::executor::block_on(async { - let count: i32 = h.get_or("visits", 0).await.unwrap(); - assert_eq!(count, 0); + fn exists_returns_true_for_present() { + let kv = handle(); + block_on(async { + kv.put_bytes("k", Bytes::from("v")).await.unwrap(); + assert!(kv.exists("k").await.unwrap()); }); } #[test] - fn typed_get_or_returns_existing() { - let h = handle(); - futures::executor::block_on(async { - h.put("visits", &99).await.unwrap(); - let count: i32 = h.get_or("visits", 0).await.unwrap(); - assert_eq!(count, 99); + fn get_or_with_complex_default() { + let kv = handle(); + block_on(async { + let default = Counter { count: 100_i32 }; + let val: Counter = kv.get_or("missing_struct", default).await.unwrap(); + assert_eq!(val.count, 100_i32); }); } #[test] - fn typed_get_bad_json_returns_serialization_error() { - let h = handle(); - futures::executor::block_on(async { - h.put_bytes("bad", Bytes::from("not json")).await.unwrap(); - let err = h.get::("bad").await.unwrap_err(); - assert!(matches!(err, KvError::Serialization(_))); + fn handle_is_cloneable_and_shares_state() { + let h1 = handle(); + let h2 = h1.clone(); + block_on(async { + h1.put("shared", &42_i32).await.unwrap(); + let val: i32 = h2.get_or("shared", 0_i32).await.unwrap(); + assert_eq!(val, 42_i32); }); } - // -- Update ------------------------------------------------------------- - #[test] - fn update_increments_counter() { - let h = handle(); - futures::executor::block_on(async { - h.put("c", &0i32).await.unwrap(); - let val = h.read_modify_write("c", 0i32, |n| n + 1).await.unwrap(); - assert_eq!(val, 1); - let val = h.read_modify_write("c", 0i32, |n| n + 1).await.unwrap(); - assert_eq!(val, 2); - }); + fn kv_error_internal_converts_to_internal() { + let kv_err = KvError::Internal(anyhow::anyhow!("boom")); + let edge_err: EdgeError = kv_err.into(); + assert_eq!(edge_err.status(), StatusCode::INTERNAL_SERVER_ERROR); + assert!(edge_err.message().contains("boom")); } #[test] - fn update_uses_default_when_missing() { - let h = handle(); - futures::executor::block_on(async { - let val = h.read_modify_write("new", 10i32, |n| n * 2).await.unwrap(); - assert_eq!(val, 20); - }); + fn kv_error_not_found_converts_to_not_found() { + let kv_err = KvError::NotFound { key: "test".into() }; + let edge_err: EdgeError = kv_err.into(); + assert_eq!(edge_err.status(), StatusCode::NOT_FOUND); + assert!(edge_err.message().contains("kv key")); } - // -- Exists ------------------------------------------------------------- - #[test] - fn exists_returns_false_for_missing() { - let h = handle(); - futures::executor::block_on(async { - assert!(!h.exists("nope").await.unwrap()); - }); + fn kv_error_serialization_converts_to_internal() { + let json_err: serde_json::Error = serde_json::from_str::("not json").unwrap_err(); + let kv_err = KvError::Serialization(json_err); + let edge_err: EdgeError = kv_err.into(); + assert_eq!(edge_err.status(), StatusCode::INTERNAL_SERVER_ERROR); + assert!(edge_err.message().contains("serialization")); } #[test] - fn exists_returns_true_for_present() { - let h = handle(); - futures::executor::block_on(async { - h.put_bytes("k", Bytes::from("v")).await.unwrap(); - assert!(h.exists("k").await.unwrap()); - }); + fn kv_error_unavailable_converts_to_service_unavailable() { + let kv_err = KvError::Unavailable; + let edge_err: EdgeError = kv_err.into(); + assert_eq!(edge_err.status(), StatusCode::SERVICE_UNAVAILABLE); } - // -- Delete ------------------------------------------------------------- - #[test] - fn delete_removes_key() { - let h = handle(); - futures::executor::block_on(async { - h.put_bytes("k", Bytes::from("v")).await.unwrap(); - h.delete("k").await.unwrap(); - assert_eq!(h.get_bytes("k").await.unwrap(), None); - }); + fn kv_handle_debug_output() { + let kv = handle(); + let debug = format!("{kv:?}"); + assert!(debug.contains("KvHandle")); } #[test] - fn delete_missing_key_is_ok() { - let h = handle(); - futures::executor::block_on(async { - h.delete("nope").await.unwrap(); + fn large_value_roundtrip() { + let kv = handle(); + block_on(async { + let large = "x".repeat(1_000_000); // 1MB string + kv.put("big", &large).await.unwrap(); + let val: Option = kv.get("big").await.unwrap(); + assert_eq!(val.as_deref(), Some(large.as_str())); }); } #[test] fn list_keys_page_roundtrip() { - let h = handle(); - futures::executor::block_on(async { - h.put("app/a", &1i32).await.unwrap(); - h.put("app/b", &2i32).await.unwrap(); - h.put("app/c", &3i32).await.unwrap(); - h.put("other/d", &4i32).await.unwrap(); - - let first = h.list_keys_page("app/", None, 2).await.unwrap(); - assert_eq!(first.keys, vec!["app/a".to_string(), "app/b".to_string()]); + let kv = handle(); + block_on(async { + kv.put("app/a", &1_i32).await.unwrap(); + kv.put("app/b", &2_i32).await.unwrap(); + kv.put("app/c", &3_i32).await.unwrap(); + kv.put("other/d", &4_i32).await.unwrap(); + + let first = kv.list_keys_page("app/", None, 2).await.unwrap(); + assert_eq!(first.keys, vec!["app/a".to_owned(), "app/b".to_owned()]); assert!(first.cursor.is_some()); assert_ne!(first.cursor.as_deref(), Some("app/b")); - let second = h + let second = kv .list_keys_page("app/", first.cursor.as_deref(), 2) .await .unwrap(); - assert_eq!(second.keys, vec!["app/c".to_string()]); + assert_eq!(second.keys, vec!["app/c".to_owned()]); assert_eq!(second.cursor, None); }); } - // -- TTL ---------------------------------------------------------------- + #[test] + fn put_overwrite_changes_type() { + let kv = handle(); + block_on(async { + kv.put("flex", &42_i32).await.unwrap(); + let int_val: i32 = kv.get_or("flex", 0_i32).await.unwrap(); + assert_eq!(int_val, 42_i32); + + // Overwrite with a different type + kv.put("flex", &"now a string").await.unwrap(); + let str_val: String = kv.get_or("flex", String::new()).await.unwrap(); + assert_eq!(str_val, "now a string"); + }); + } #[test] fn put_with_ttl_stores_value() { - let h = handle(); - futures::executor::block_on(async { - h.put_with_ttl("session", &"token123", Duration::from_secs(60)) + let kv = handle(); + block_on(async { + kv.put_with_ttl("session", &"token123", Duration::from_secs(60)) .await .unwrap(); - let val: Option = h.get("session").await.unwrap(); - assert_eq!(val, Some("token123".to_string())); + let val: Option = kv.get("session").await.unwrap(); + assert_eq!(val, Some("token123".to_owned())); }); } - // -- KvError -> EdgeError ----------------------------------------------- + #[test] + fn put_with_ttl_typed_helper() { + let kv = handle(); + block_on(async { + let data = Counter { count: 7_i32 }; + kv.put_with_ttl("ttl_key", &data, Duration::from_secs(600)) + .await + .unwrap(); + let val: Option = kv.get("ttl_key").await.unwrap(); + assert_eq!(val, Some(Counter { count: 7_i32 })); + }); + } #[test] - fn kv_error_not_found_converts_to_not_found() { - let kv_err = KvError::NotFound { key: "test".into() }; - let edge_err: EdgeError = kv_err.into(); - assert_eq!(edge_err.status(), StatusCode::NOT_FOUND); - assert!(edge_err.message().contains("kv key")); + fn raw_bytes_missing_key_returns_none() { + let kv = handle(); + block_on(async { + assert_eq!(kv.get_bytes("missing").await.unwrap(), None); + }); } #[test] - fn kv_error_unavailable_converts_to_service_unavailable() { - let kv_err = KvError::Unavailable; - let edge_err: EdgeError = kv_err.into(); - assert_eq!(edge_err.status(), StatusCode::SERVICE_UNAVAILABLE); + fn raw_bytes_overwrite() { + let kv = handle(); + block_on(async { + kv.put_bytes("k", Bytes::from("a")).await.unwrap(); + kv.put_bytes("k", Bytes::from("b")).await.unwrap(); + assert_eq!(kv.get_bytes("k").await.unwrap(), Some(Bytes::from("b"))); + }); } #[test] - fn kv_error_internal_converts_to_internal() { - let kv_err = KvError::Internal(anyhow::anyhow!("boom")); - let edge_err: EdgeError = kv_err.into(); - assert_eq!(edge_err.status(), StatusCode::INTERNAL_SERVER_ERROR); - assert!(edge_err.message().contains("boom")); + fn raw_bytes_roundtrip() { + let kv = handle(); + block_on(async { + kv.put_bytes("k", Bytes::from("hello")).await.unwrap(); + assert_eq!(kv.get_bytes("k").await.unwrap(), Some(Bytes::from("hello"))); + }); } - // -- Clone handle ------------------------------------------------------- + #[test] + fn typed_get_bad_json_returns_serialization_error() { + let kv = handle(); + block_on(async { + kv.put_bytes("bad", Bytes::from("not json")).await.unwrap(); + let err = kv.get::("bad").await.unwrap_err(); + assert!(matches!(err, KvError::Serialization(_))); + }); + } #[test] - fn handle_is_cloneable_and_shares_state() { - let h1 = handle(); - let h2 = h1.clone(); - futures::executor::block_on(async { - h1.put("shared", &42i32).await.unwrap(); - let val: i32 = h2.get_or("shared", 0).await.unwrap(); - assert_eq!(val, 42); + fn typed_get_missing_returns_none() { + let kv = handle(); + block_on(async { + let out: Option = kv.get("nope").await.unwrap(); + assert_eq!(out, None); }); } - // -- Edge cases --------------------------------------------------------- + #[test] + fn typed_get_or_returns_default() { + let kv = handle(); + block_on(async { + let count: i32 = kv.get_or("visits", 0_i32).await.unwrap(); + assert_eq!(count, 0_i32); + }); + } #[test] - fn empty_key_rejected() { - let h = handle(); - futures::executor::block_on(async { - let err = h.put("", &"empty key").await.unwrap_err(); - assert!(matches!(err, KvError::Validation(_))); - assert!(format!("{}", err).contains("cannot be empty")); + fn typed_get_or_returns_existing() { + let kv = handle(); + block_on(async { + kv.put("visits", &99_i32).await.unwrap(); + let count: i32 = kv.get_or("visits", 0_i32).await.unwrap(); + assert_eq!(count, 99_i32); }); } #[test] - fn unicode_key_roundtrip() { - let h = handle(); - futures::executor::block_on(async { - h.put("日本語キー", &"value").await.unwrap(); - let val: Option = h.get("日本語キー").await.unwrap(); - assert_eq!(val, Some("value".to_string())); + fn typed_get_put_roundtrip() { + let kv = handle(); + block_on(async { + let data = Counter { count: 42 }; + kv.put("counter", &data).await.unwrap(); + let out: Option = kv.get("counter").await.unwrap(); + assert_eq!(out, Some(data)); }); } #[test] - fn large_value_roundtrip() { - let h = handle(); - futures::executor::block_on(async { - let large = "x".repeat(1_000_000); // 1MB string - h.put("big", &large).await.unwrap(); - let val: Option = h.get("big").await.unwrap(); - assert_eq!(val.as_deref(), Some(large.as_str())); + fn unicode_key_roundtrip() { + // "日本語キー" — the literal is written as Unicode escapes so the source + // file stays ASCII-only. The runtime bytes are identical. + const JAPANESE_KEY: &str = "\u{65E5}\u{672C}\u{8A9E}\u{30AD}\u{30FC}"; + let kv = handle(); + block_on(async { + kv.put(JAPANESE_KEY, &"value").await.unwrap(); + let val: Option = kv.get(JAPANESE_KEY).await.unwrap(); + assert_eq!(val, Some("value".to_owned())); }); } #[test] - fn put_with_ttl_typed_helper() { - let h = handle(); - futures::executor::block_on(async { - let data = Counter { count: 7 }; - h.put_with_ttl("ttl_key", &data, Duration::from_secs(600)) + fn update_increments_counter() { + let kv = handle(); + block_on(async { + kv.put("c", &0_i32).await.unwrap(); + let after_first = kv + .read_modify_write("c", 0_i32, |num| num + 1_i32) + .await + .unwrap(); + assert_eq!(after_first, 1_i32); + let after_second = kv + .read_modify_write("c", 0_i32, |num| num + 1_i32) .await .unwrap(); - let val: Option = h.get("ttl_key").await.unwrap(); - assert_eq!(val, Some(Counter { count: 7 })); + assert_eq!(after_second, 2_i32); }); } #[test] - fn get_or_with_complex_default() { - let h = handle(); - futures::executor::block_on(async { - let default = Counter { count: 100 }; - let val: Counter = h.get_or("missing_struct", default).await.unwrap(); - assert_eq!(val.count, 100); + fn update_uses_default_when_missing() { + let kv = handle(); + block_on(async { + let val = kv + .read_modify_write("new", 10_i32, |num| num * 2_i32) + .await + .unwrap(); + assert_eq!(val, 20_i32); }); } #[test] fn update_with_struct() { - let h = handle(); - futures::executor::block_on(async { - let val = h - .read_modify_write("counter_struct", Counter { count: 0 }, |mut c| { - c.count += 10; - c + let kv = handle(); + block_on(async { + let after_first = kv + .read_modify_write("counter_struct", Counter { count: 0_i32 }, |mut counter| { + counter.count += 10_i32; + counter }) .await .unwrap(); - assert_eq!(val.count, 10); + assert_eq!(after_first.count, 10_i32); - let val = h - .read_modify_write("counter_struct", Counter { count: 0 }, |mut c| { - c.count += 5; - c + let after_second = kv + .read_modify_write("counter_struct", Counter { count: 0_i32 }, |mut counter| { + counter.count += 5_i32; + counter }) .await .unwrap(); - assert_eq!(val.count, 15); + assert_eq!(after_second.count, 15_i32); }); } #[test] - fn kv_error_serialization_converts_to_internal() { - let json_err: serde_json::Error = serde_json::from_str::("not json").unwrap_err(); - let kv_err = KvError::Serialization(json_err); - let edge_err: EdgeError = kv_err.into(); - assert_eq!(edge_err.status(), StatusCode::INTERNAL_SERVER_ERROR); - assert!(edge_err.message().contains("serialization")); - } - - #[test] - fn kv_handle_debug_output() { - let h = handle(); - let debug = format!("{:?}", h); - assert!(debug.contains("KvHandle")); - } - - // -- Validation Tests --------------------------------------------------- - - #[test] - fn validation_rejects_long_keys() { - let h = handle(); - futures::executor::block_on(async { - let long_key = "a".repeat(KvHandle::MAX_KEY_SIZE + 1); - let err = h.get::(&long_key).await.unwrap_err(); + fn validation_rejects_control_chars() { + let kv = handle(); + block_on(async { + let err = kv.get::("key\nwith\nnewline").await.unwrap_err(); assert!(matches!(err, KvError::Validation(_))); - assert!(format!("{}", err).contains("key length")); + assert!(format!("{err}").contains("control characters")); }); } #[test] - fn validation_rejects_dot_keys() { - let h = handle(); - futures::executor::block_on(async { - let err = h.get::(".").await.unwrap_err(); - assert!(matches!(err, KvError::Validation(_))); - assert!(format!("{}", err).contains("cannot be exactly")); - - let err = h.get::("..").await.unwrap_err(); + fn validation_rejects_control_chars_in_prefix() { + let kv = handle(); + block_on(async { + let err = kv.list_keys_page("bad\nprefix", None, 1).await.unwrap_err(); assert!(matches!(err, KvError::Validation(_))); - assert!(format!("{}", err).contains("cannot be exactly")); + assert!(format!("{err}").contains("control characters")); }); } #[test] - fn validation_rejects_control_chars() { - let h = handle(); - futures::executor::block_on(async { - let err = h.get::("key\nwith\nnewline").await.unwrap_err(); - assert!(matches!(err, KvError::Validation(_))); - assert!(format!("{}", err).contains("control characters")); - }); - } + fn validation_rejects_cursor_for_different_prefix() { + let kv = handle(); + block_on(async { + kv.put("app/a", &1_i32).await.unwrap(); + kv.put("app/b", &2_i32).await.unwrap(); - #[test] - fn validation_rejects_large_values() { - let h = handle(); - futures::executor::block_on(async { - let large_val = vec![0u8; KvHandle::MAX_VALUE_SIZE + 1]; - let err = h - .put_bytes("large", Bytes::from(large_val)) + let page = kv.list_keys_page("app/", None, 1).await.unwrap(); + let err = kv + .list_keys_page("other/", page.cursor.as_deref(), 1) .await .unwrap_err(); assert!(matches!(err, KvError::Validation(_))); - assert!(format!("{}", err).contains("value size")); + assert!(format!("{err}").contains("requested prefix")); }); } #[test] - fn validation_rejects_short_ttl() { - let h = handle(); - futures::executor::block_on(async { - let err = h - .put_with_ttl("short", &"val", Duration::from_secs(10)) - .await - .unwrap_err(); - assert!(matches!(err, KvError::Validation(_))); - assert!(format!("{}", err).contains("at least 60 seconds")); + fn validation_rejects_dot_keys() { + let kv = handle(); + block_on(async { + let single_dot_err = kv.get::(".").await.unwrap_err(); + assert!(matches!(single_dot_err, KvError::Validation(_))); + assert!(format!("{single_dot_err}").contains("cannot be exactly")); + + let double_dot_err = kv.get::("..").await.unwrap_err(); + assert!(matches!(double_dot_err, KvError::Validation(_))); + assert!(format!("{double_dot_err}").contains("cannot be exactly")); }); } #[test] - fn validation_rejects_long_ttl() { - let h = handle(); - futures::executor::block_on(async { - let err = h - .put_with_ttl("long", &"val", KvHandle::MAX_TTL + Duration::from_secs(1)) + fn validation_rejects_large_list_limit() { + let kv = handle(); + block_on(async { + let err = kv + .list_keys_page("", None, KvHandle::MAX_LIST_PAGE_SIZE + 1) .await .unwrap_err(); assert!(matches!(err, KvError::Validation(_))); - assert!(format!("{}", err).contains("exceeds maximum")); + assert!(format!("{err}").contains("list limit")); }); } #[test] - fn validation_rejects_zero_list_limit() { - let h = handle(); - futures::executor::block_on(async { - let err = h.list_keys_page("", None, 0).await.unwrap_err(); + fn validation_rejects_large_values() { + let kv = handle(); + block_on(async { + let large_val = vec![0_u8; KvHandle::MAX_VALUE_SIZE + 1]; + let err = kv + .put_bytes("large", Bytes::from(large_val)) + .await + .unwrap_err(); assert!(matches!(err, KvError::Validation(_))); - assert!(format!("{}", err).contains("greater than zero")); + assert!(format!("{err}").contains("value size")); }); } #[test] - fn validation_rejects_large_list_limit() { - let h = handle(); - futures::executor::block_on(async { - let err = h - .list_keys_page("", None, KvHandle::MAX_LIST_PAGE_SIZE + 1) - .await - .unwrap_err(); + fn validation_rejects_long_keys() { + let kv = handle(); + block_on(async { + let long_key = "a".repeat(KvHandle::MAX_KEY_SIZE + 1); + let err = kv.get::(&long_key).await.unwrap_err(); assert!(matches!(err, KvError::Validation(_))); - assert!(format!("{}", err).contains("list limit")); + assert!(format!("{err}").contains("key length")); }); } #[test] fn validation_rejects_long_prefix() { - let h = handle(); - futures::executor::block_on(async { + let kv = handle(); + block_on(async { let prefix = "a".repeat(KvHandle::MAX_KEY_SIZE + 1); - let err = h.list_keys_page(&prefix, None, 1).await.unwrap_err(); + let err = kv.list_keys_page(&prefix, None, 1).await.unwrap_err(); assert!(matches!(err, KvError::Validation(_))); - assert!(format!("{}", err).contains("prefix length")); + assert!(format!("{err}").contains("prefix length")); }); } #[test] - fn validation_rejects_control_chars_in_prefix() { - let h = handle(); - futures::executor::block_on(async { - let err = h.list_keys_page("bad\nprefix", None, 1).await.unwrap_err(); + fn validation_rejects_long_ttl() { + let kv = handle(); + block_on(async { + let err = kv + .put_with_ttl("long", &"val", KvHandle::MAX_TTL + Duration::from_secs(1)) + .await + .unwrap_err(); assert!(matches!(err, KvError::Validation(_))); - assert!(format!("{}", err).contains("control characters")); + assert!(format!("{err}").contains("exceeds maximum")); }); } #[test] fn validation_rejects_malformed_list_cursor() { - let h = handle(); - futures::executor::block_on(async { - let err = h + let kv = handle(); + block_on(async { + let err = kv .list_keys_page("app/", Some("not-json"), 1) .await .unwrap_err(); assert!(matches!(err, KvError::Validation(_))); - assert!(format!("{}", err).contains("cursor")); + assert!(format!("{err}").contains("cursor")); }); } #[test] - fn validation_rejects_cursor_for_different_prefix() { - let h = handle(); - futures::executor::block_on(async { - h.put("app/a", &1i32).await.unwrap(); - h.put("app/b", &2i32).await.unwrap(); - - let page = h.list_keys_page("app/", None, 1).await.unwrap(); - let err = h - .list_keys_page("other/", page.cursor.as_deref(), 1) + fn validation_rejects_short_ttl() { + let kv = handle(); + block_on(async { + let err = kv + .put_with_ttl("short", &"val", Duration::from_secs(10)) .await .unwrap_err(); assert!(matches!(err, KvError::Validation(_))); - assert!(format!("{}", err).contains("requested prefix")); - }); - } - - #[test] - fn exists_returns_false_after_delete() { - let h = handle(); - futures::executor::block_on(async { - h.put_bytes("ephemeral", Bytes::from("v")).await.unwrap(); - assert!(h.exists("ephemeral").await.unwrap()); - h.delete("ephemeral").await.unwrap(); - assert!(!h.exists("ephemeral").await.unwrap()); + assert!(format!("{err}").contains("at least 60 seconds")); }); } #[test] - fn put_overwrite_changes_type() { - let h = handle(); - futures::executor::block_on(async { - h.put("flex", &42i32).await.unwrap(); - let val: i32 = h.get_or("flex", 0).await.unwrap(); - assert_eq!(val, 42); - - // Overwrite with a different type - h.put("flex", &"now a string").await.unwrap(); - let val: String = h.get_or("flex", String::new()).await.unwrap(); - assert_eq!(val, "now a string"); + fn validation_rejects_zero_list_limit() { + let kv = handle(); + block_on(async { + let err = kv.list_keys_page("", None, 0).await.unwrap_err(); + assert!(matches!(err, KvError::Validation(_))); + assert!(format!("{err}").contains("greater than zero")); }); } - - // Run the shared contract tests against MockStore. - crate::key_value_store_contract_tests!(mock_store_contract, MockStore::new()); } diff --git a/crates/edgezero-core/src/lib.rs b/crates/edgezero-core/src/lib.rs index 7295053..12d0b47 100644 --- a/crates/edgezero-core/src/lib.rs +++ b/crates/edgezero-core/src/lib.rs @@ -1,5 +1,14 @@ //! Core primitives for building portable edge workloads across edge adapters. +// Targets a single line — the proc-macro re-export at the bottom of this +// file. The `pub_use` lint is module-scoped (cannot be `#[expect]`-ed +// per-item), and proc-macros must be re-exported here so downstream users +// depend only on `edgezero-core` (not `edgezero-macros`). +#![expect( + clippy::pub_use, + reason = "proc-macros must be re-exported through the parent crate" +)] + pub mod app; pub mod body; pub mod compression; @@ -19,11 +28,4 @@ pub mod response; pub mod router; pub mod secret_store; -pub use config_store::{ConfigStore, ConfigStoreError, ConfigStoreHandle}; pub use edgezero_macros::{action, app}; -#[cfg(any(test, feature = "test-utils"))] -pub use key_value_store::NoopKvStore; -pub use key_value_store::{KvError, KvHandle, KvPage, KvStore}; -#[cfg(any(test, feature = "test-utils"))] -pub use secret_store::{InMemorySecretStore, NoopSecretStore}; -pub use secret_store::{SecretError, SecretHandle, SecretStore}; diff --git a/crates/edgezero-core/src/manifest.rs b/crates/edgezero-core/src/manifest.rs index 571a496..30e51d7 100644 --- a/crates/edgezero-core/src/manifest.rs +++ b/crates/edgezero-core/src/manifest.rs @@ -1,33 +1,32 @@ use log::LevelFilter; +use serde::de::Error as DeError; use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; -use std::io; use std::path::{Path, PathBuf}; use std::sync::Arc; +use std::{env, fs, io}; use validator::{Validate, ValidationError}; +pub const DEFAULT_CONFIG_STORE_NAME: &str = "EDGEZERO_CONFIG"; +/// Default KV store / binding name used when `[stores.kv]` is omitted. +pub const DEFAULT_KV_STORE_NAME: &str = "EDGEZERO_KV"; +/// Default secret store / binding name used when `[stores.secrets]` is omitted. +pub const DEFAULT_SECRET_STORE_NAME: &str = "EDGEZERO_SECRETS"; +const SUPPORTED_CONFIG_STORE_ADAPTERS: &[&str] = &["axum", "cloudflare", "fastly"]; + pub struct ManifestLoader { manifest: Arc, } impl ManifestLoader { - pub fn load_from_str(contents: &str) -> Self { - let mut manifest: Manifest = - toml::from_str(contents).expect("edgezero manifest should be valid"); - manifest - .validate() - .expect("edgezero manifest failed validation"); - manifest.finalize(); - Self { - manifest: Arc::new(manifest), - } - } - + /// # Errors + /// Returns an [`io::Error`] if `path` cannot be read, or the file content cannot be parsed/validated as an `EdgeZero` manifest. + #[inline] pub fn from_path(path: &Path) -> Result { - let contents = std::fs::read_to_string(path)?; + let contents = fs::read_to_string(path)?; let mut manifest: Manifest = toml::from_str(&contents) .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))?; - let cwd = std::env::current_dir()?; + let cwd = env::current_dir()?; let root_path = resolve_root_path(path, &cwd); manifest.root = Some(root_path); manifest @@ -39,62 +38,88 @@ impl ManifestLoader { }) } + /// Loads a manifest from a static, compile-time-embedded TOML string + /// (typically `include_str!("edgezero.toml")` inside an adapter binary). + /// + /// # Panics + /// Panics if `contents` is not valid TOML or fails validation. Because + /// `contents` is baked into the binary at build time, a parse/validation + /// failure means the binary itself is malformed — there is no runtime + /// recovery path, and surfacing the error as a panic with a clear + /// message is the correct behavior. Callers with a fallible input + /// source (file paths, network, user input) should use + /// [`ManifestLoader::try_load_from_str`] or [`ManifestLoader::from_path`]. + #[expect( + clippy::panic, + reason = "load_from_str only consumes binary-embedded manifests; \ + a parse error means the binary is corrupt and cannot recover" + )] + #[must_use] + #[inline] + pub fn load_from_str(contents: &str) -> Self { + Self::try_load_from_str(contents).unwrap_or_else(|err| panic!("invalid manifest: {err}")) + } + + #[must_use] + #[inline] pub fn manifest(&self) -> &Manifest { &self.manifest } -} -fn resolve_root_path(path: &Path, cwd: &Path) -> PathBuf { - match path.parent() { - Some(parent) if parent.as_os_str().is_empty() => cwd.to_path_buf(), - Some(parent) if parent.is_relative() => cwd.join(parent), - Some(parent) => parent.to_path_buf(), - None => cwd.to_path_buf(), + /// # Errors + /// Returns an [`io::Error`] if `contents` is not valid TOML or fails manifest validation. + #[inline] + pub fn try_load_from_str(contents: &str) -> Result { + let mut manifest: Manifest = toml::from_str(contents) + .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))?; + manifest + .validate() + .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))?; + manifest.finalize(); + Ok(Self { + manifest: Arc::new(manifest), + }) } } -pub const DEFAULT_CONFIG_STORE_NAME: &str = "EDGEZERO_CONFIG"; -const SUPPORTED_CONFIG_STORE_ADAPTERS: &[&str] = &["axum", "cloudflare", "fastly"]; - #[derive(Debug, Deserialize, Validate)] +#[expect( + clippy::partial_pub_fields, + reason = "deserialized fields are pub for the public API; internal state is private" +)] pub struct Manifest { #[serde(default)] #[validate(nested)] - pub app: ManifestApp, + pub adapters: BTreeMap, #[serde(default)] #[validate(nested)] - pub triggers: ManifestTriggers, + pub app: ManifestApp, #[serde(default)] #[validate(nested)] pub environment: ManifestEnvironment, #[serde(default)] #[validate(nested)] - pub stores: ManifestStores, + pub logging: ManifestLogging, + #[serde(skip)] + logging_resolved: BTreeMap, + #[serde(skip)] + root: Option, #[serde(default)] #[validate(nested)] - pub adapters: BTreeMap, + pub stores: ManifestStores, #[serde(default)] #[validate(nested)] - pub logging: ManifestLogging, - #[serde(skip)] - pub(crate) root: Option, - #[serde(skip)] - pub(crate) logging_resolved: BTreeMap, + pub triggers: ManifestTriggers, } impl Manifest { - pub fn root(&self) -> Option<&Path> { - self.root.as_deref() - } - - pub fn logging_for(&self, adapter: &str) -> Option<&ResolvedLoggingConfig> { - self.logging_resolved.get(adapter) - } - - pub fn logging_or_default(&self, adapter: &str) -> ResolvedLoggingConfig { - self.logging_for(adapter).cloned().unwrap_or_default() + #[must_use] + #[inline] + pub fn environment(&self) -> &ManifestEnvironment { + &self.environment } + #[inline] pub fn environment_for(&self, adapter: &str) -> ResolvedEnvironment { let adapter_lower = adapter.to_ascii_lowercase(); @@ -114,11 +139,28 @@ impl Manifest { .map(ResolvedEnvironmentBinding::from_manifest) .collect(); - ResolvedEnvironment { variables, secrets } + ResolvedEnvironment { secrets, variables } } - pub fn environment(&self) -> &ManifestEnvironment { - &self.environment + pub(crate) fn finalize(&mut self) { + let mut resolved = BTreeMap::new(); + + for (adapter, cfg) in &self.adapters { + if cfg.logging.is_specified() { + resolved.insert( + adapter.clone(), + ResolvedLoggingConfig::from_manifest(&cfg.logging), + ); + } + } + + for (adapter, cfg) in &self.logging.adapters { + resolved + .entry(adapter.clone()) + .or_insert_with(|| ResolvedLoggingConfig::from_manifest(cfg)); + } + + self.logging_resolved = resolved; } /// Returns the KV store name for a given adapter. @@ -127,101 +169,100 @@ impl Manifest { /// 1. Per-adapter override (`[stores.kv.adapters.]`) /// 2. Global name (`[stores.kv] name = "..."`) /// 3. Default: `"EDGEZERO_KV"` + #[must_use] + #[inline] pub fn kv_store_name(&self, adapter: &str) -> &str { - match &self.stores.kv { - Some(kv) => { - let adapter_lower = adapter.to_ascii_lowercase(); - if let Some(adapter_cfg) = kv - .adapters - .iter() - .find(|(k, _)| k.eq_ignore_ascii_case(&adapter_lower)) - { - return &adapter_cfg.1.name; - } - &kv.name - } - None => DEFAULT_KV_STORE_NAME, + let Some(kv) = self.stores.kv.as_ref() else { + return DEFAULT_KV_STORE_NAME; + }; + let adapter_lower = adapter.to_ascii_lowercase(); + if let Some(adapter_cfg) = kv + .adapters + .iter() + .find(|&(name, _)| name.eq_ignore_ascii_case(&adapter_lower)) + { + return &adapter_cfg.1.name; } + &kv.name } - /// Returns the secret store name for a given adapter. + #[must_use] + #[inline] + pub fn logging_for(&self, adapter: &str) -> Option<&ResolvedLoggingConfig> { + self.logging_resolved.get(adapter) + } + + #[must_use] + #[inline] + pub fn logging_or_default(&self, adapter: &str) -> ResolvedLoggingConfig { + self.logging_for(adapter).cloned().unwrap_or_default() + } + + #[must_use] + #[inline] + pub fn root(&self) -> Option<&Path> { + self.root.as_deref() + } + + /// Returns the secret store binding identifier for a given adapter. /// /// Resolution order: /// 1. Per-adapter override (`[stores.secrets.adapters.]`) /// 2. Global name (`[stores.secrets] name = "..."`) /// 3. Default: `"EDGEZERO_SECRETS"` - pub fn secret_store_name(&self, adapter: &str) -> &str { - match &self.stores.secrets { - Some(secrets) => { - let adapter_lower = adapter.to_ascii_lowercase(); - if let Some(adapter_cfg) = secrets - .adapters - .iter() - .find(|(k, _)| k.eq_ignore_ascii_case(&adapter_lower)) - { - if let Some(name) = adapter_cfg.1.name.as_deref() { - return name; - } - } - &secrets.name + #[must_use] + #[inline] + pub fn secret_store_binding(&self, adapter: &str) -> &str { + let Some(secrets) = self.stores.secrets.as_ref() else { + return DEFAULT_SECRET_STORE_NAME; + }; + let adapter_lower = adapter.to_ascii_lowercase(); + if let Some(adapter_cfg) = secrets + .adapters + .iter() + .find(|&(name, _)| name.eq_ignore_ascii_case(&adapter_lower)) + { + if let Some(name) = adapter_cfg.1.name.as_deref() { + return name; } - None => DEFAULT_SECRET_STORE_NAME, } + &secrets.name } /// Returns whether the secret store should be attached for a given adapter. + #[must_use] + #[inline] pub fn secret_store_enabled(&self, adapter: &str) -> bool { - match &self.stores.secrets { - Some(secrets) => { - let adapter_lower = adapter.to_ascii_lowercase(); - if let Some(adapter_cfg) = secrets - .adapters - .iter() - .find(|(k, _)| k.eq_ignore_ascii_case(&adapter_lower)) - { - return adapter_cfg.1.enabled; - } - secrets.enabled - } - None => false, - } - } - - pub(crate) fn finalize(&mut self) { - let mut resolved = BTreeMap::new(); - - for (adapter, cfg) in &self.adapters { - if cfg.logging.is_specified() { - resolved.insert( - adapter.clone(), - ResolvedLoggingConfig::from_manifest(&cfg.logging), - ); - } - } - - for (adapter, cfg) in &self.logging.adapters { - resolved - .entry(adapter.clone()) - .or_insert_with(|| ResolvedLoggingConfig::from_manifest(cfg)); + let Some(secrets) = self.stores.secrets.as_ref() else { + return false; + }; + let adapter_lower = adapter.to_ascii_lowercase(); + if let Some(adapter_cfg) = secrets + .adapters + .iter() + .find(|&(name, _)| name.eq_ignore_ascii_case(&adapter_lower)) + { + return adapter_cfg.1.enabled; } - - self.logging_resolved = resolved; + secrets.enabled } } #[derive(Debug, Default, Deserialize, Validate)] +#[non_exhaustive] pub struct ManifestApp { #[serde(default)] - #[validate(length(min = 1))] - pub name: Option, - #[serde(default)] - #[validate(length(min = 1))] + #[validate(length(min = 1_u64))] pub entry: Option, #[serde(default)] pub middleware: Vec, + #[serde(default)] + #[validate(length(min = 1_u64))] + pub name: Option, } #[derive(Debug, Default, Deserialize, Validate)] +#[non_exhaustive] pub struct ManifestTriggers { #[serde(default)] #[validate(nested)] @@ -229,59 +270,67 @@ pub struct ManifestTriggers { } #[derive(Clone, Debug, Deserialize, Validate)] +#[non_exhaustive] pub struct ManifestHttpTrigger { #[serde(default)] - #[validate(length(min = 1))] - pub id: Option, - #[validate(length(min = 1))] - pub path: String, + pub adapters: Vec, + #[serde(rename = "body-mode")] #[serde(default)] - #[validate(length(min = 1))] - pub handler: Option, + pub body_mode: Option, #[serde(default)] - pub methods: Vec, + #[validate(length(min = 1_u64))] + pub description: Option, #[serde(default)] - pub adapters: Vec, + #[validate(length(min = 1_u64))] + pub handler: Option, #[serde(default)] - #[validate(length(min = 1))] - pub description: Option, - #[serde(rename = "body-mode")] + #[validate(length(min = 1_u64))] + pub id: Option, #[serde(default)] - pub body_mode: Option, + pub methods: Vec, + #[validate(length(min = 1_u64))] + pub path: String, } impl ManifestHttpTrigger { + #[inline] pub fn methods(&self) -> Vec<&str> { if self.methods.is_empty() { vec!["GET"] } else { - self.methods.iter().map(|m| m.as_str()).collect() + self.methods + .iter() + .copied() + .map(HttpMethod::as_str) + .collect() } } } #[derive(Debug, Default, Deserialize, Validate)] +#[non_exhaustive] pub struct ManifestEnvironment { #[serde(default)] #[validate(nested)] - pub variables: Vec, + pub secrets: Vec, #[serde(default)] #[validate(nested)] - pub secrets: Vec, + pub variables: Vec, } #[derive(Debug, Deserialize, Validate)] +#[non_exhaustive] pub struct ManifestBinding { - #[validate(length(min = 1))] - pub name: String, - #[serde(default)] - #[validate(length(min = 1))] - pub description: Option, #[serde(default)] pub adapters: Vec, #[serde(default)] - #[validate(length(min = 1))] + #[validate(length(min = 1_u64))] + pub description: Option, + #[serde(default)] + #[validate(length(min = 1_u64))] pub env: Option, + #[validate(length(min = 1_u64))] + pub name: String, #[serde(default)] pub value: Option, } @@ -314,19 +363,20 @@ impl ResolvedEnvironmentBinding { #[derive(Clone, Debug)] pub struct ResolvedEnvironmentBinding { - pub name: String, pub description: Option, pub env: String, + pub name: String, pub value: Option, } #[derive(Clone, Debug, Default)] pub struct ResolvedEnvironment { - pub variables: Vec, pub secrets: Vec, + pub variables: Vec, } #[derive(Debug, Default, Deserialize, Validate)] +#[non_exhaustive] pub struct ManifestAdapter { #[serde(default)] #[validate(nested)] @@ -343,39 +393,42 @@ pub struct ManifestAdapter { } #[derive(Debug, Default, Deserialize, Validate)] +#[non_exhaustive] pub struct ManifestAdapterDefinition { #[serde(rename = "crate")] #[serde(default)] - #[validate(length(min = 1))] + #[validate(length(min = 1_u64))] pub crate_path: Option, #[serde(default)] - #[validate(length(min = 1))] + #[validate(length(min = 1_u64))] pub manifest: Option, } #[derive(Debug, Default, Deserialize, Validate)] +#[non_exhaustive] pub struct ManifestAdapterBuild { #[serde(default)] - #[validate(length(min = 1))] - pub target: Option, + pub features: Vec, #[serde(default)] - #[validate(length(min = 1))] + #[validate(length(min = 1_u64))] pub profile: Option, #[serde(default)] - pub features: Vec, + #[validate(length(min = 1_u64))] + pub target: Option, } #[derive(Debug, Default, Deserialize, Validate)] +#[non_exhaustive] pub struct ManifestAdapterCommands { #[serde(default)] - #[validate(length(min = 1))] + #[validate(length(min = 1_u64))] pub build: Option, #[serde(default)] - #[validate(length(min = 1))] - pub serve: Option, - #[serde(default)] - #[validate(length(min = 1))] + #[validate(length(min = 1_u64))] pub deploy: Option, + #[serde(default)] + #[validate(length(min = 1_u64))] + pub serve: Option, } // --------------------------------------------------------------------------- @@ -384,6 +437,7 @@ pub struct ManifestAdapterCommands { /// Top-level `[stores]` section. #[derive(Debug, Default, Deserialize, Validate)] +#[non_exhaustive] pub struct ManifestStores { #[serde(default)] #[validate(nested)] @@ -398,11 +452,8 @@ pub struct ManifestStores { /// `[stores.config]` section — provider-neutral config store. #[derive(Debug, Deserialize, Validate)] +#[non_exhaustive] pub struct ManifestConfigStoreConfig { - /// Global store/binding name used when no adapter-specific override is set. - #[serde(default)] - #[validate(length(min = 1))] - pub name: Option, /// Per-adapter name overrides, keyed by supported lowercase adapter name /// (`axum`, `cloudflare`, or `fastly`). #[serde(default)] @@ -412,75 +463,43 @@ pub struct ManifestConfigStoreConfig { /// Optional default values used for local dev (Axum adapter). #[serde(default)] pub defaults: BTreeMap, + /// Global store/binding name used when no adapter-specific override is set. + #[serde(default)] + #[validate(length(min = 1_u64))] + pub name: Option, } /// `[stores.config.adapters.]` override. #[derive(Debug, Deserialize, Serialize, Validate)] +#[non_exhaustive] pub struct ManifestConfigAdapterConfig { - #[validate(length(min = 1))] + #[validate(length(min = 1_u64))] pub name: String, } -fn validate_config_store_adapter_keys( - adapters: &BTreeMap, -) -> Result<(), ValidationError> { - let mixed_case_keys = adapters - .keys() - .filter(|key| key.as_str() != key.to_ascii_lowercase()) - .cloned() - .collect::>(); - if !mixed_case_keys.is_empty() { - let mut error = ValidationError::new("config_store_adapter_keys_lowercase"); - error.message = Some( - format!( - "config store adapter override keys must be lowercase: {}", - mixed_case_keys.join(", ") - ) - .into(), - ); - return Err(error); - } - - let unknown_keys = adapters - .keys() - .filter(|key| !SUPPORTED_CONFIG_STORE_ADAPTERS.contains(&key.as_str())) - .cloned() - .collect::>(); - if unknown_keys.is_empty() { - return Ok(()); +impl ManifestConfigStoreConfig { + /// Access the default key-value pairs for local dev. + #[must_use] + #[inline] + pub fn config_store_defaults(&self) -> &BTreeMap { + &self.defaults } - let mut error = ValidationError::new("config_store_adapter_keys_known"); - error.message = Some( - format!( - "config store adapter override keys must match supported adapters ({}): {}", - SUPPORTED_CONFIG_STORE_ADAPTERS.join(", "), - unknown_keys.join(", ") - ) - .into(), - ); - Err(error) -} - -impl ManifestConfigStoreConfig { /// Resolve the config store name for a given adapter. /// /// Priority: adapter override → global name → `DEFAULT_CONFIG_STORE_NAME`. + #[must_use] + #[inline] pub fn config_store_name(&self, adapter: &str) -> &str { let adapter_lower = adapter.to_ascii_lowercase(); if let Some(override_cfg) = self.adapters.get(&adapter_lower) { return &override_cfg.name; } - if let Some(name) = &self.name { - return name.as_str(); + if let Some(name) = self.name.as_deref() { + return name; } DEFAULT_CONFIG_STORE_NAME } - - /// Access the default key-value pairs for local dev. - pub fn config_store_defaults(&self) -> &BTreeMap { - &self.defaults - } } // --------------------------------------------------------------------------- @@ -488,6 +507,7 @@ impl ManifestConfigStoreConfig { // --------------------------------------------------------------------------- #[derive(Debug, Default, Deserialize, Validate)] +#[non_exhaustive] pub struct ManifestLogging { #[serde(flatten)] #[validate(nested)] @@ -495,24 +515,26 @@ pub struct ManifestLogging { } #[derive(Debug, Default, Deserialize, Clone, Validate)] +#[non_exhaustive] pub struct ManifestLoggingConfig { #[serde(default)] - pub level: Option, + pub echo_stdout: Option, #[serde(default)] - #[validate(length(min = 1))] + #[validate(length(min = 1_u64))] pub endpoint: Option, #[serde(default)] - pub echo_stdout: Option, + pub level: Option, } #[derive(Debug, Clone)] pub struct ResolvedLoggingConfig { - pub level: LogLevel, - pub endpoint: Option, pub echo_stdout: Option, + pub endpoint: Option, + pub level: LogLevel, } impl Default for ResolvedLoggingConfig { + #[inline] fn default() -> Self { Self { level: LogLevel::Info, @@ -528,7 +550,7 @@ impl ResolvedLoggingConfig { if let Some(level) = cfg.level { resolved.level = level; } - if let Some(endpoint) = &cfg.endpoint { + if let Some(endpoint) = cfg.endpoint.as_ref() { resolved.endpoint = Some(endpoint.clone()); } if let Some(echo_stdout) = cfg.echo_stdout { @@ -544,65 +566,51 @@ impl ManifestLoggingConfig { } } -/// Default KV store / binding name used when `[stores.kv]` is omitted. -pub const DEFAULT_KV_STORE_NAME: &str = "EDGEZERO_KV"; - -fn default_kv_name() -> String { - DEFAULT_KV_STORE_NAME.to_string() -} - -/// Default secret store / binding name used when `[stores.secrets]` is omitted. -pub const DEFAULT_SECRET_STORE_NAME: &str = "EDGEZERO_SECRETS"; - -fn default_secret_name() -> String { - DEFAULT_SECRET_STORE_NAME.to_string() -} - -fn default_enabled() -> bool { - true -} - /// Global KV store configuration. #[derive(Debug, Deserialize, Validate)] +#[non_exhaustive] pub struct ManifestKvConfig { - /// Store / binding name (default: `"EDGEZERO_KV"`). - #[serde(default = "default_kv_name")] - #[validate(length(min = 1))] - pub name: String, - /// Per-adapter name overrides. #[serde(default)] #[validate(nested)] pub adapters: BTreeMap, + + /// Store / binding name (default: `"EDGEZERO_KV"`). + #[serde(default = "default_kv_name")] + #[validate(length(min = 1_u64))] + pub name: String, } /// Per-adapter KV binding / store name override. #[derive(Debug, Deserialize, Validate)] +#[non_exhaustive] pub struct ManifestKvAdapterConfig { - #[validate(length(min = 1))] + #[validate(length(min = 1_u64))] pub name: String, } /// Global secret store configuration. #[derive(Debug, Deserialize, Validate)] +#[non_exhaustive] pub struct ManifestSecretsConfig { + /// Per-adapter name overrides. + #[serde(default)] + #[validate(nested)] + pub adapters: BTreeMap, + /// Whether the secret store is enabled for adapters without overrides. #[serde(default = "default_enabled")] pub enabled: bool, /// Store / binding name (default: `"EDGEZERO_SECRETS"`). #[serde(default = "default_secret_name")] - #[validate(length(min = 1))] + #[validate(length(min = 1_u64))] pub name: String, - - /// Per-adapter name overrides. - #[serde(default)] - #[validate(nested)] - pub adapters: BTreeMap, } /// Per-adapter secret store name override. #[derive(Debug, Deserialize, Validate)] +#[non_exhaustive] pub struct ManifestSecretsAdapterConfig { /// Whether the secret store is enabled for this adapter. #[serde(default = "default_enabled")] @@ -610,36 +618,48 @@ pub struct ManifestSecretsAdapterConfig { /// Optional per-adapter secret store name override. #[serde(default)] - #[validate(length(min = 1))] + #[validate(length(min = 1_u64))] pub name: Option, } -#[derive(Clone, Debug, Eq, PartialEq)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +#[non_exhaustive] pub enum HttpMethod { + Delete, Get, + Head, + Options, + Patch, Post, Put, - Delete, - Patch, - Options, - Head, } impl HttpMethod { - pub fn as_str(&self) -> &'static str { + #[must_use] + #[inline] + pub fn as_str(self) -> &'static str { match self { + Self::Delete => "DELETE", Self::Get => "GET", + Self::Head => "HEAD", + Self::Options => "OPTIONS", + Self::Patch => "PATCH", Self::Post => "POST", Self::Put => "PUT", - Self::Delete => "DELETE", - Self::Patch => "PATCH", - Self::Options => "OPTIONS", - Self::Head => "HEAD", } } } +// Serde's `Deserialize` trait has an optional `deserialize_in_place` method +// that defaults to `*place = Self::deserialize(deserializer)?`. For these +// small Copy/clone enums there is nothing to gain from spelling out an +// override — the default already does exactly the right thing. +#[expect( + clippy::missing_trait_methods, + reason = "default deserialize_in_place is identical to what we would write manually" +)] impl<'de> Deserialize<'de> for HttpMethod { + #[inline] fn deserialize(deserializer: D) -> Result where D: serde::Deserializer<'de>, @@ -653,21 +673,30 @@ impl<'de> Deserialize<'de> for HttpMethod { "PATCH" => Ok(Self::Patch), "OPTIONS" => Ok(Self::Options), "HEAD" => Ok(Self::Head), - other => Err(serde::de::Error::custom(format!( - "unsupported HTTP method `{}`", - other + other => Err(DeError::custom(format!( + "unsupported HTTP method `{other}`" ))), } } } #[derive(Clone, Debug, Eq, PartialEq)] +#[non_exhaustive] pub enum BodyMode { Buffered, Stream, } +// Serde's `Deserialize` trait has an optional `deserialize_in_place` method +// that defaults to `*place = Self::deserialize(deserializer)?`. For these +// small Copy/clone enums there is nothing to gain from spelling out an +// override — the default already does exactly the right thing. +#[expect( + clippy::missing_trait_methods, + reason = "default deserialize_in_place is identical to what we would write manually" +)] impl<'de> Deserialize<'de> for BodyMode { + #[inline] fn deserialize(deserializer: D) -> Result where D: serde::Deserializer<'de>, @@ -676,27 +705,27 @@ impl<'de> Deserialize<'de> for BodyMode { match value.trim().to_ascii_lowercase().as_str() { "buffered" => Ok(Self::Buffered), "stream" => Ok(Self::Stream), - other => Err(serde::de::Error::custom(format!( - "unsupported body mode `{}`", - other - ))), + other => Err(DeError::custom(format!("unsupported body mode `{other}`"))), } } } #[derive(Clone, Copy, Debug, Eq, PartialEq, Default)] +#[non_exhaustive] pub enum LogLevel { - Trace, Debug, + Error, #[default] Info, - Warn, - Error, Off, + Trace, + Warn, } impl LogLevel { - pub fn as_str(&self) -> &'static str { + #[must_use] + #[inline] + pub fn as_str(self) -> &'static str { match self { Self::Trace => "trace", Self::Debug => "debug", @@ -709,6 +738,7 @@ impl LogLevel { } impl From for LevelFilter { + #[inline] fn from(level: LogLevel) -> Self { match level { LogLevel::Trace => LevelFilter::Trace, @@ -721,7 +751,16 @@ impl From for LevelFilter { } } +// Serde's `Deserialize` trait has an optional `deserialize_in_place` method +// that defaults to `*place = Self::deserialize(deserializer)?`. For these +// small Copy/clone enums there is nothing to gain from spelling out an +// override — the default already does exactly the right thing. +#[expect( + clippy::missing_trait_methods, + reason = "default deserialize_in_place is identical to what we would write manually" +)] impl<'de> Deserialize<'de> for LogLevel { + #[inline] fn deserialize(deserializer: D) -> Result where D: serde::Deserializer<'de>, @@ -734,19 +773,80 @@ impl<'de> Deserialize<'de> for LogLevel { "warn" => Ok(Self::Warn), "error" => Ok(Self::Error), "off" => Ok(Self::Off), - other => Err(serde::de::Error::custom(format!( - "logging level must be trace, debug, info, warn, error, or off (got `{}`)", - other + other => Err(DeError::custom(format!( + "logging level must be trace, debug, info, warn, error, or off (got `{other}`)" ))), } } } +fn default_enabled() -> bool { + true +} + +fn default_kv_name() -> String { + DEFAULT_KV_STORE_NAME.to_owned() +} + +fn default_secret_name() -> String { + DEFAULT_SECRET_STORE_NAME.to_owned() +} + +fn resolve_root_path(path: &Path, cwd: &Path) -> PathBuf { + match path.parent() { + Some(parent) if parent.as_os_str().is_empty() => cwd.to_path_buf(), + Some(parent) if parent.is_relative() => cwd.join(parent), + Some(parent) => parent.to_path_buf(), + None => cwd.to_path_buf(), + } +} + +fn validate_config_store_adapter_keys( + adapters: &BTreeMap, +) -> Result<(), ValidationError> { + let mixed_case_keys = adapters + .keys() + .filter(|key| key.as_str() != key.to_ascii_lowercase()) + .cloned() + .collect::>(); + if !mixed_case_keys.is_empty() { + let mut error = ValidationError::new("config_store_adapter_keys_lowercase"); + error.message = Some( + format!( + "config store adapter override keys must be lowercase: {}", + mixed_case_keys.join(", ") + ) + .into(), + ); + return Err(error); + } + + let unknown_keys = adapters + .keys() + .filter(|key| !SUPPORTED_CONFIG_STORE_ADAPTERS.contains(&key.as_str())) + .cloned() + .collect::>(); + if unknown_keys.is_empty() { + return Ok(()); + } + + let mut error = ValidationError::new("config_store_adapter_keys_known"); + error.message = Some( + format!( + "config store adapter override keys must match supported adapters ({}): {}", + SUPPORTED_CONFIG_STORE_ADAPTERS.join(", "), + unknown_keys.join(", ") + ) + .into(), + ); + Err(error) +} + #[cfg(test)] mod tests { use super::*; - use std::fs; use std::path::PathBuf; + use std::process; use tempfile::{tempdir, tempdir_in, NamedTempFile}; const SAMPLE: &str = r#" @@ -820,7 +920,7 @@ env = "APP_TOKEN" #[test] fn manifest_from_path_handles_relative_parent() { - let cwd = std::env::current_dir().unwrap(); + let cwd = env::current_dir().unwrap(); let dir = tempdir_in(&cwd).unwrap(); let path = dir.path().join("edgezero.toml"); fs::write(&path, "").unwrap(); @@ -833,7 +933,7 @@ env = "APP_TOKEN" #[test] fn manifest_from_path_uses_cwd_for_empty_parent() { - let cwd = std::env::current_dir().unwrap(); + let cwd = env::current_dir().unwrap(); let file = NamedTempFile::new_in(&cwd).unwrap(); fs::write(file.path(), "").unwrap(); let file_name = file.path().file_name().unwrap(); @@ -845,8 +945,8 @@ env = "APP_TOKEN" #[test] fn manifest_from_path_uses_cwd_when_parent_is_none() { - let cwd = std::env::current_dir().unwrap(); - let file_name = format!("edgezero-test-manifest-{}.toml", std::process::id()); + let cwd = env::current_dir().unwrap(); + let file_name = format!("edgezero-test-manifest-{}.toml", process::id()); let path = cwd.join(&file_name); fs::write(&path, "").unwrap(); @@ -948,15 +1048,15 @@ path = "/head" methods = ["HEAD"] "#; let loader = ManifestLoader::load_from_str(manifest); - let m = loader.manifest(); - assert_eq!(m.triggers.http.len(), 7); - assert_eq!(m.triggers.http[0].methods(), vec!["GET"]); - assert_eq!(m.triggers.http[1].methods(), vec!["POST"]); - assert_eq!(m.triggers.http[2].methods(), vec!["PUT"]); - assert_eq!(m.triggers.http[3].methods(), vec!["DELETE"]); - assert_eq!(m.triggers.http[4].methods(), vec!["PATCH"]); - assert_eq!(m.triggers.http[5].methods(), vec!["OPTIONS"]); - assert_eq!(m.triggers.http[6].methods(), vec!["HEAD"]); + let mfest = loader.manifest(); + assert_eq!(mfest.triggers.http.len(), 7); + assert_eq!(mfest.triggers.http[0].methods(), vec!["GET"]); + assert_eq!(mfest.triggers.http[1].methods(), vec!["POST"]); + assert_eq!(mfest.triggers.http[2].methods(), vec!["PUT"]); + assert_eq!(mfest.triggers.http[3].methods(), vec!["DELETE"]); + assert_eq!(mfest.triggers.http[4].methods(), vec!["PATCH"]); + assert_eq!(mfest.triggers.http[5].methods(), vec!["OPTIONS"]); + assert_eq!(mfest.triggers.http[6].methods(), vec!["HEAD"]); } #[test] @@ -981,8 +1081,8 @@ path = "/test" methods = ["get", "Post", "PUT"] "#; let loader = ManifestLoader::load_from_str(manifest); - let m = loader.manifest(); - assert_eq!(m.triggers.http[0].methods(), vec!["GET", "POST", "PUT"]); + let mfest = loader.manifest(); + assert_eq!(mfest.triggers.http[0].methods(), vec!["GET", "POST", "PUT"]); } #[test] @@ -992,8 +1092,8 @@ methods = ["get", "Post", "PUT"] path = "/test" "#; let loader = ManifestLoader::load_from_str(manifest); - let m = loader.manifest(); - assert_eq!(m.triggers.http[0].methods(), vec!["GET"]); + let mfest = loader.manifest(); + assert_eq!(mfest.triggers.http[0].methods(), vec!["GET"]); } // BodyMode parsing tests @@ -1005,8 +1105,8 @@ path = "/test" body-mode = "buffered" "#; let loader = ManifestLoader::load_from_str(manifest); - let m = loader.manifest(); - assert_eq!(m.triggers.http[0].body_mode, Some(BodyMode::Buffered)); + let mfest = loader.manifest(); + assert_eq!(mfest.triggers.http[0].body_mode, Some(BodyMode::Buffered)); } #[test] @@ -1017,8 +1117,8 @@ path = "/test" body-mode = "stream" "#; let loader = ManifestLoader::load_from_str(manifest); - let m = loader.manifest(); - assert_eq!(m.triggers.http[0].body_mode, Some(BodyMode::Stream)); + let mfest = loader.manifest(); + assert_eq!(mfest.triggers.http[0].body_mode, Some(BodyMode::Stream)); } #[test] @@ -1058,13 +1158,22 @@ level = "error" level = "off" "#; let loader = ManifestLoader::load_from_str(manifest); - let m = loader.manifest(); - assert_eq!(m.logging_for("adapter1").unwrap().level, LogLevel::Trace); - assert_eq!(m.logging_for("adapter2").unwrap().level, LogLevel::Debug); - assert_eq!(m.logging_for("adapter3").unwrap().level, LogLevel::Info); - assert_eq!(m.logging_for("adapter4").unwrap().level, LogLevel::Warn); - assert_eq!(m.logging_for("adapter5").unwrap().level, LogLevel::Error); - assert_eq!(m.logging_for("adapter6").unwrap().level, LogLevel::Off); + let mfest = loader.manifest(); + assert_eq!( + mfest.logging_for("adapter1").unwrap().level, + LogLevel::Trace + ); + assert_eq!( + mfest.logging_for("adapter2").unwrap().level, + LogLevel::Debug + ); + assert_eq!(mfest.logging_for("adapter3").unwrap().level, LogLevel::Info); + assert_eq!(mfest.logging_for("adapter4").unwrap().level, LogLevel::Warn); + assert_eq!( + mfest.logging_for("adapter5").unwrap().level, + LogLevel::Error + ); + assert_eq!(mfest.logging_for("adapter6").unwrap().level, LogLevel::Off); } #[test] @@ -1106,8 +1215,8 @@ level = "off" name = "test" "#; let loader = ManifestLoader::load_from_str(manifest); - let m = loader.manifest(); - let logging = m.logging_or_default("unknown"); + let mfest = loader.manifest(); + let logging = mfest.logging_or_default("unknown"); assert_eq!(logging.level, LogLevel::Info); assert!(logging.endpoint.is_none()); assert!(logging.echo_stdout.is_none()); @@ -1132,8 +1241,8 @@ endpoint = "https://logs.example.com" echo_stdout = true "#; let loader = ManifestLoader::load_from_str(manifest); - let m = loader.manifest(); - let logging = m.logging_for("axum").unwrap(); + let mfest = loader.manifest(); + let logging = mfest.logging_for("axum").unwrap(); assert_eq!(logging.level, LogLevel::Debug); assert_eq!( logging.endpoint.as_deref(), @@ -1150,8 +1259,8 @@ level = "error" endpoint = "https://fastly-logs.example.com" "#; let loader = ManifestLoader::load_from_str(manifest); - let m = loader.manifest(); - let logging = m.logging_for("fastly").unwrap(); + let mfest = loader.manifest(); + let logging = mfest.logging_for("fastly").unwrap(); assert_eq!(logging.level, LogLevel::Error); assert_eq!( logging.endpoint.as_deref(), @@ -1169,8 +1278,8 @@ env = "ACTUAL_ENV_KEY" value = "some-value" "#; let loader = ManifestLoader::load_from_str(manifest); - let m = loader.manifest(); - let env = m.environment_for("any-adapter"); + let mfest = loader.manifest(); + let env = mfest.environment_for("any-adapter"); assert_eq!(env.variables[0].name, "MY_VAR"); assert_eq!(env.variables[0].env, "ACTUAL_ENV_KEY"); assert_eq!(env.variables[0].value.as_deref(), Some("some-value")); @@ -1184,8 +1293,8 @@ name = "API_KEY" value = "secret" "#; let loader = ManifestLoader::load_from_str(manifest); - let m = loader.manifest(); - let env = m.environment_for("any-adapter"); + let mfest = loader.manifest(); + let env = mfest.environment_for("any-adapter"); assert_eq!(env.variables[0].name, "API_KEY"); assert_eq!(env.variables[0].env, "API_KEY"); } @@ -1208,17 +1317,17 @@ name = "VAR3" value = "v3" "#; let loader = ManifestLoader::load_from_str(manifest); - let m = loader.manifest(); + let mfest = loader.manifest(); - let fastly_env = m.environment_for("FASTLY"); + let fastly_env = mfest.environment_for("FASTLY"); assert_eq!(fastly_env.variables.len(), 2); // VAR1 and VAR3 - assert!(fastly_env.variables.iter().any(|v| v.name == "VAR1")); - assert!(fastly_env.variables.iter().any(|v| v.name == "VAR3")); + assert!(fastly_env.variables.iter().any(|var| var.name == "VAR1")); + assert!(fastly_env.variables.iter().any(|var| var.name == "VAR3")); - let cf_env = m.environment_for("Cloudflare"); + let cf_env = mfest.environment_for("Cloudflare"); assert_eq!(cf_env.variables.len(), 2); // VAR2 and VAR3 - assert!(cf_env.variables.iter().any(|v| v.name == "VAR2")); - assert!(cf_env.variables.iter().any(|v| v.name == "VAR3")); + assert!(cf_env.variables.iter().any(|var| var.name == "VAR2")); + assert!(cf_env.variables.iter().any(|var| var.name == "VAR3")); } #[test] @@ -1229,8 +1338,8 @@ name = "DB_PASSWORD" description = "Database password for production" "#; let loader = ManifestLoader::load_from_str(manifest); - let m = loader.manifest(); - let env = m.environment_for("any"); + let mfest = loader.manifest(); + let env = mfest.environment_for("any"); assert_eq!( env.secrets[0].description.as_deref(), Some("Database password for production") @@ -1247,8 +1356,8 @@ profile = "release" features = ["feature1", "feature2"] "#; let loader = ManifestLoader::load_from_str(manifest); - let m = loader.manifest(); - let adapter = m.adapters.get("fastly").unwrap(); + let mfest = loader.manifest(); + let adapter = &mfest.adapters["fastly"]; assert_eq!(adapter.build.target.as_deref(), Some("wasm32-wasip1")); assert_eq!(adapter.build.profile.as_deref(), Some("release")); assert_eq!(adapter.build.features, vec!["feature1", "feature2"]); @@ -1263,8 +1372,8 @@ serve = "fastly compute serve" deploy = "fastly compute deploy" "#; let loader = ManifestLoader::load_from_str(manifest); - let m = loader.manifest(); - let adapter = m.adapters.get("fastly").unwrap(); + let mfest = loader.manifest(); + let adapter = &mfest.adapters["fastly"]; assert_eq!( adapter.commands.build.as_deref(), Some("fastly compute build") @@ -1287,8 +1396,8 @@ crate = "crates/fastly-adapter" manifest = "fastly.toml" "#; let loader = ManifestLoader::load_from_str(manifest); - let m = loader.manifest(); - let adapter = m.adapters.get("fastly").unwrap(); + let mfest = loader.manifest(); + let adapter = &mfest.adapters["fastly"]; assert_eq!( adapter.adapter.crate_path.as_deref(), Some("crates/fastly-adapter") @@ -1301,11 +1410,11 @@ manifest = "fastly.toml" fn empty_manifest_has_defaults() { let manifest = ""; let loader = ManifestLoader::load_from_str(manifest); - let m = loader.manifest(); - assert!(m.app.name.is_none()); - assert!(m.app.entry.is_none()); - assert!(m.triggers.http.is_empty()); - assert!(m.adapters.is_empty()); + let mfest = loader.manifest(); + assert!(mfest.app.name.is_none()); + assert!(mfest.app.entry.is_none()); + assert!(mfest.triggers.http.is_empty()); + assert!(mfest.adapters.is_empty()); } #[test] @@ -1332,8 +1441,8 @@ manifest = "fastly.toml" // [stores.config] present but no name and no adapter overrides: // config_store_name() must return DEFAULT_CONFIG_STORE_NAME. let toml = "[stores.config]\n"; - let m = ManifestLoader::load_from_str(toml); - let config = m.manifest().stores.config.as_ref().unwrap(); + let mfest = ManifestLoader::load_from_str(toml); + let config = mfest.manifest().stores.config.as_ref().unwrap(); assert_eq!( config.config_store_name("fastly"), DEFAULT_CONFIG_STORE_NAME @@ -1358,8 +1467,8 @@ manifest = "fastly.toml" [stores.config] name = "app_config" "#; - let m = ManifestLoader::load_from_str(toml); - let config = m.manifest().stores.config.as_ref().unwrap(); + let mfest = ManifestLoader::load_from_str(toml); + let config = mfest.manifest().stores.config.as_ref().unwrap(); assert_eq!(config.config_store_name("fastly"), "app_config"); assert_eq!(config.config_store_name("cloudflare"), "app_config"); assert_eq!(config.config_store_name("axum"), "app_config"); @@ -1377,8 +1486,8 @@ name = "my-config-link" [stores.config.adapters.cloudflare] name = "APP_CONFIG_BINDING" "#; - let m = ManifestLoader::load_from_str(toml); - let config = m.manifest().stores.config.as_ref().unwrap(); + let mfest = ManifestLoader::load_from_str(toml); + let config = mfest.manifest().stores.config.as_ref().unwrap(); assert_eq!(config.config_store_name("fastly"), "my-config-link"); assert_eq!(config.config_store_name("cloudflare"), "APP_CONFIG_BINDING"); assert_eq!(config.config_store_name("axum"), "global_config"); @@ -1390,8 +1499,8 @@ name = "APP_CONFIG_BINDING" [stores.config.adapters.fastly] name = "fastly-store" "#; - let m = ManifestLoader::load_from_str(toml); - let config = m.manifest().stores.config.as_ref().unwrap(); + let mfest = ManifestLoader::load_from_str(toml); + let config = mfest.manifest().stores.config.as_ref().unwrap(); assert_eq!(config.config_store_name("FASTLY"), "fastly-store"); assert_eq!(config.config_store_name("Fastly"), "fastly-store"); assert_eq!(config.config_store_name("fastly"), "fastly-store"); @@ -1451,23 +1560,23 @@ name = "SPIN_CONFIG" "feature.checkout" = "true" "service.timeout_ms" = "1500" "#; - let m = ManifestLoader::load_from_str(toml); - let config = m.manifest().stores.config.as_ref().unwrap(); + let mfest = ManifestLoader::load_from_str(toml); + let config = mfest.manifest().stores.config.as_ref().unwrap(); let defaults = config.config_store_defaults(); assert_eq!( - defaults.get("feature.checkout").map(|s| s.as_str()), + defaults.get("feature.checkout").map(String::as_str), Some("true") ); assert_eq!( - defaults.get("service.timeout_ms").map(|s| s.as_str()), + defaults.get("service.timeout_ms").map(String::as_str), Some("1500") ); } #[test] fn empty_manifest_has_no_config_store() { - let m = ManifestLoader::load_from_str(""); - assert!(m.manifest().stores.config.is_none()); + let mfest = ManifestLoader::load_from_str(""); + assert!(mfest.manifest().stores.config.is_none()); } #[test] @@ -1579,39 +1688,39 @@ name = "FASTLY_STORE" // -- Secret store config ----------------------------------------------- #[test] - fn secret_store_name_defaults_to_constant_when_absent() { + fn secret_store_binding_defaults_to_constant_when_absent() { let manifest = ManifestLoader::load_from_str("[app]\nname = \"x\"\n"); assert_eq!( - manifest.manifest().secret_store_name("fastly"), + manifest.manifest().secret_store_binding("fastly"), DEFAULT_SECRET_STORE_NAME ); } #[test] - fn secret_store_name_uses_global_name_when_declared() { + fn secret_store_binding_uses_global_name_when_declared() { let manifest = ManifestLoader::load_from_str("[stores.secrets]\nname = \"MY_SECRETS\"\n"); assert_eq!( - manifest.manifest().secret_store_name("fastly"), + manifest.manifest().secret_store_binding("fastly"), "MY_SECRETS" ); assert_eq!( - manifest.manifest().secret_store_name("cloudflare"), + manifest.manifest().secret_store_binding("cloudflare"), "MY_SECRETS" ); } #[test] - fn secret_store_name_uses_per_adapter_override() { + fn secret_store_binding_uses_per_adapter_override() { let manifest = ManifestLoader::load_from_str( "[stores.secrets]\nname = \"MY_SECRETS\"\n\ [stores.secrets.adapters.fastly]\nname = \"FASTLY_STORE\"\n", ); assert_eq!( - manifest.manifest().secret_store_name("fastly"), + manifest.manifest().secret_store_binding("fastly"), "FASTLY_STORE" ); assert_eq!( - manifest.manifest().secret_store_name("cloudflare"), + manifest.manifest().secret_store_binding("cloudflare"), "MY_SECRETS" ); } @@ -1661,11 +1770,11 @@ name = "FASTLY_STORE" assert!(manifest.manifest().secret_store_enabled("fastly")); assert!(!manifest.manifest().secret_store_enabled("cloudflare")); assert_eq!( - manifest.manifest().secret_store_name("fastly"), + manifest.manifest().secret_store_binding("fastly"), "FASTLY_STORE" ); assert_eq!( - manifest.manifest().secret_store_name("cloudflare"), + manifest.manifest().secret_store_binding("cloudflare"), DEFAULT_SECRET_STORE_NAME ); } diff --git a/crates/edgezero-core/src/middleware.rs b/crates/edgezero-core/src/middleware.rs index de8582d..47fffea 100644 --- a/crates/edgezero-core/src/middleware.rs +++ b/crates/edgezero-core/src/middleware.rs @@ -11,24 +11,57 @@ use crate::http::Response; pub type BoxMiddleware = Arc; +pub struct FnMiddleware +where + F: Send + Sync + 'static, +{ + func: F, +} + +impl FnMiddleware +where + F: Send + Sync + 'static, +{ + #[inline] + pub fn new(func: F) -> Self { + Self { func } + } +} + +#[async_trait(?Send)] +impl Middleware for FnMiddleware +where + F: Fn(RequestContext, Next<'_>) -> Fut + Send + Sync + 'static, + Fut: Future>, +{ + #[inline] + async fn handle(&self, ctx: RequestContext, next: Next<'_>) -> Result { + (self.func)(ctx, next).await + } +} + #[async_trait(?Send)] pub trait Middleware: Send + Sync + 'static { async fn handle(&self, ctx: RequestContext, next: Next<'_>) -> Result; } -pub struct Next<'a> { - middlewares: &'a [BoxMiddleware], - handler: &'a dyn DynHandler, +pub struct Next<'mw> { + handler: &'mw dyn DynHandler, + middlewares: &'mw [BoxMiddleware], } -impl<'a> Next<'a> { - pub fn new(middlewares: &'a [BoxMiddleware], handler: &'a dyn DynHandler) -> Self { +impl<'mw> Next<'mw> { + #[inline] + pub fn new(middlewares: &'mw [BoxMiddleware], handler: &'mw dyn DynHandler) -> Self { Self { - middlewares, handler, + middlewares, } } + /// # Errors + /// Returns whatever error the next middleware or the final handler produces. + #[inline] pub async fn run(self, ctx: RequestContext) -> Result { if let Some((head, tail)) = self.middlewares.split_first() { head.handle(ctx, Next::new(tail, self.handler)).await @@ -42,17 +75,18 @@ pub struct RequestLogger; #[async_trait(?Send)] impl Middleware for RequestLogger { + #[inline] async fn handle(&self, ctx: RequestContext, next: Next<'_>) -> Result { let method = ctx.request().method().clone(); - let path = ctx.request().uri().path().to_string(); + let path = ctx.request().uri().path().to_owned(); let start = Instant::now(); match next.run(ctx).await { Ok(response) => { let status = response.status(); - let elapsed = start.elapsed().as_secs_f64() * 1000.0; + let elapsed = start.elapsed().as_millis(); tracing::info!( - "request method={} path={} status={} elapsed_ms={:.2}", + "request method={} path={} status={} elapsed_ms={}", method, path, status.as_u16(), @@ -63,9 +97,9 @@ impl Middleware for RequestLogger { Err(err) => { let status = err.status(); let message = err.message(); - let elapsed = start.elapsed().as_secs_f64() * 1000.0; + let elapsed = start.elapsed().as_millis(); tracing::error!( - "request method={} path={} status={} error={} elapsed_ms={:.2}", + "request method={} path={} status={} error={} elapsed_ms={}", method, path, status.as_u16(), @@ -78,50 +112,25 @@ impl Middleware for RequestLogger { } } -pub struct FnMiddleware -where - F: Send + Sync + 'static, -{ - f: F, -} - -impl FnMiddleware -where - F: Send + Sync + 'static, -{ - pub fn new(f: F) -> Self { - Self { f } - } -} - -#[async_trait(?Send)] -impl Middleware for FnMiddleware +#[inline] +pub fn middleware_fn(func: F) -> FnMiddleware where F: Fn(RequestContext, Next<'_>) -> Fut + Send + Sync + 'static, Fut: Future>, { - async fn handle(&self, ctx: RequestContext, next: Next<'_>) -> Result { - (self.f)(ctx, next).await - } -} - -pub fn middleware_fn(f: F) -> FnMiddleware -where - F: Fn(RequestContext, Next<'_>) -> Fut + Send + Sync + 'static, - Fut: Future>, -{ - FnMiddleware::new(f) + FnMiddleware::new(func) } #[cfg(test)] mod tests { use super::*; use crate::body::Body; - use crate::handler::IntoHandler; + use crate::handler::IntoHandler as _; use crate::http::{request_builder, Method, Response, StatusCode}; use crate::params::PathParams; use crate::response::response_with_body; use futures::executor::block_on; + use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; struct RecordingMiddleware { @@ -129,19 +138,16 @@ mod tests { name: &'static str, } + struct ShortCircuit; + #[async_trait(?Send)] impl Middleware for RecordingMiddleware { async fn handle(&self, ctx: RequestContext, next: Next<'_>) -> Result { - { - let mut entries = self.log.lock().unwrap(); - entries.push(self.name.to_string()); - } + self.log.lock().unwrap().push(self.name.to_owned()); next.run(ctx).await } } - struct ShortCircuit; - #[async_trait(?Send)] impl Middleware for ShortCircuit { async fn handle( @@ -149,7 +155,7 @@ mod tests { _ctx: RequestContext, _next: Next<'_>, ) -> Result { - Ok(response_with_body(StatusCode::UNAUTHORIZED, Body::empty())) + response_with_body(StatusCode::UNAUTHORIZED, Body::empty()) } } @@ -163,7 +169,17 @@ mod tests { } async fn ok_handler(_ctx: RequestContext) -> Result { - Ok(response_with_body(StatusCode::OK, Body::empty())) + response_with_body(StatusCode::OK, Body::empty()) + } + + #[test] + fn middleware_can_short_circuit() { + let handler = ok_handler.into_handler(); + + let middlewares: Vec = vec![Arc::new(ShortCircuit)]; + let response = block_on(Next::new(&middlewares, handler.as_ref()).run(empty_context())) + .expect("response"); + assert_eq!(response.status(), StatusCode::UNAUTHORIZED); } #[test] @@ -180,31 +196,38 @@ mod tests { }; let handler = (|_ctx: RequestContext| async move { - Ok::(response_with_body(StatusCode::OK, Body::empty())) + response_with_body(StatusCode::OK, Body::empty()) }) .into_handler(); - let middlewares: Vec = vec![ - Arc::new(first) as BoxMiddleware, - Arc::new(second) as BoxMiddleware, - ]; + let middlewares: Vec = vec![Arc::new(first), Arc::new(second)]; let result = block_on(Next::new(&middlewares, handler.as_ref()).run(empty_context())) .expect("response"); assert_eq!(result.status(), StatusCode::OK); let calls = log.lock().unwrap().clone(); - assert_eq!(calls, vec!["first".to_string(), "second".to_string()]); + assert_eq!(calls, vec!["first".to_owned(), "second".to_owned()]); } #[test] - fn middleware_can_short_circuit() { - let handler = ok_handler.into_handler(); + fn middleware_fn_executes_closure() { + let called = Arc::new(AtomicBool::new(false)); + let outer_flag = Arc::clone(&called); + let middleware = middleware_fn(move |_ctx, _next| { + let inner_flag = Arc::clone(&outer_flag); + async move { + inner_flag.store(true, Ordering::SeqCst); + response_with_body(StatusCode::OK, Body::empty()) + } + }); - let middlewares: Vec = vec![Arc::new(ShortCircuit) as BoxMiddleware]; + let handler = ok_handler.into_handler(); + let middlewares: Vec = vec![Arc::new(middleware)]; let response = block_on(Next::new(&middlewares, handler.as_ref()).run(empty_context())) .expect("response"); - assert_eq!(response.status(), StatusCode::UNAUTHORIZED); + assert_eq!(response.status(), StatusCode::OK); + assert!(called.load(Ordering::SeqCst)); } #[test] @@ -234,24 +257,4 @@ mod tests { .expect_err("error"); assert_eq!(err.status(), StatusCode::BAD_REQUEST); } - - #[test] - fn middleware_fn_executes_closure() { - let called = Arc::new(Mutex::new(false)); - let flag = Arc::clone(&called); - let middleware = middleware_fn(move |_ctx, _next| { - let flag = Arc::clone(&flag); - async move { - *flag.lock().unwrap() = true; - Ok(response_with_body(StatusCode::OK, Body::empty())) - } - }); - - let handler = ok_handler.into_handler(); - let middlewares: Vec = vec![Arc::new(middleware) as BoxMiddleware]; - let response = block_on(Next::new(&middlewares, handler.as_ref()).run(empty_context())) - .expect("response"); - assert_eq!(response.status(), StatusCode::OK); - assert!(*called.lock().unwrap()); - } } diff --git a/crates/edgezero-core/src/params.rs b/crates/edgezero-core/src/params.rs index eb0b919..67bd177 100644 --- a/crates/edgezero-core/src/params.rs +++ b/crates/edgezero-core/src/params.rs @@ -9,14 +9,9 @@ pub struct PathParams { } impl PathParams { - pub fn new(inner: HashMap) -> Self { - Self { inner } - } - - pub fn get(&self, key: &str) -> Option<&str> { - self.inner.get(key).map(|s| s.as_str()) - } - + /// # Errors + /// Returns [`serde_json::Error`] if the path parameters cannot be deserialized into `T`. + #[inline] pub fn deserialize(&self) -> Result where T: DeserializeOwned, @@ -24,6 +19,17 @@ impl PathParams { let value = serde_json::to_value(&self.inner)?; serde_json::from_value(value) } + + #[inline] + pub fn get(&self, key: &str) -> Option<&str> { + self.inner.get(key).map(String::as_str) + } + + #[must_use] + #[inline] + pub fn new(inner: HashMap) -> Self { + Self { inner } + } } #[cfg(test)] @@ -31,24 +37,17 @@ mod tests { use super::*; use serde::Deserialize; - fn params(map: &[(&str, &str)]) -> PathParams { - let inner = map - .iter() - .map(|(k, v)| ((*k).to_string(), (*v).to_string())) - .collect(); - PathParams::new(inner) - } - #[derive(Debug, Deserialize, PartialEq)] struct StringParams { id: String, } - #[test] - fn get_returns_expected_value() { - let params = params(&[("id", "7")]); - assert_eq!(params.get("id"), Some("7")); - assert_eq!(params.get("missing"), None); + fn params(map: &[(&str, &str)]) -> PathParams { + let inner = map + .iter() + .map(|(key, value)| ((*key).to_owned(), (*value).to_owned())) + .collect(); + PathParams::new(inner) } #[test] @@ -60,14 +59,22 @@ mod tests { #[test] fn deserialize_propagates_errors() { - #[allow(dead_code)] + #[expect(dead_code, reason = "field exercised only via Deserialize")] #[derive(Debug, Deserialize)] struct NumericParams { id: u32, } let params = params(&[("id", "not-a-number")]); - let result: Result = params.deserialize(); - assert!(result.is_err()); + params + .deserialize::() + .expect_err("`id` is not a number"); + } + + #[test] + fn get_returns_expected_value() { + let params = params(&[("id", "7")]); + assert_eq!(params.get("id"), Some("7")); + assert_eq!(params.get("missing"), None); } } diff --git a/crates/edgezero-core/src/proxy.rs b/crates/edgezero-core/src/proxy.rs index ec28857..60e96e1 100644 --- a/crates/edgezero-core/src/proxy.rs +++ b/crates/edgezero-core/src/proxy.rs @@ -13,69 +13,112 @@ use crate::http::{ /// forwarded the request (e.g. "fastly", "cloudflare", "spin"). pub const PROXY_HEADER: &str = "x-edgezero-proxy"; -/// Outbound request description for a proxy operation. -pub struct ProxyRequest { - method: Method, - uri: Uri, - headers: HeaderMap, - body: Body, - extensions: Extensions, +#[async_trait(?Send)] +pub trait ProxyClient: Send + Sync { + async fn send(&self, request: ProxyRequest) -> Result; } -impl ProxyRequest { - pub fn new(method: Method, uri: Uri) -> Self { - Self { - method, - uri, - headers: HeaderMap::new(), - body: Body::empty(), - extensions: Extensions::new(), - } - } +#[derive(Clone)] +pub struct ProxyHandle { + client: Arc, +} - pub fn from_request(request: Request, uri: Uri) -> Self { - let (parts, body) = request.into_parts(); - Self { - method: parts.method, - uri, - headers: parts.headers, - body, - extensions: parts.extensions, - } +impl ProxyHandle { + #[must_use] + #[inline] + pub fn client(&self) -> Arc { + Arc::clone(&self.client) } - pub fn method(&self) -> &Method { - &self.method + /// # Errors + /// Returns [`EdgeError`] if the underlying [`ProxyClient`] fails or the + /// response cannot be assembled. + #[inline] + pub async fn forward(&self, request: ProxyRequest) -> Result { + let response = self.client.send(request).await?; + response.into_response() } - pub fn uri(&self) -> &Uri { - &self.uri + #[inline] + pub fn new(client: Arc) -> Self { + Self { client } } - pub fn headers(&self) -> &HeaderMap { - &self.headers + #[inline] + pub fn with_client(client: C) -> Self + where + C: ProxyClient + 'static, + { + Self { + client: Arc::new(client), + } } +} - pub fn headers_mut(&mut self) -> &mut HeaderMap { - &mut self.headers +/// Outbound request description for a proxy operation. +pub struct ProxyRequest { + body: Body, + extensions: Extensions, + headers: HeaderMap, + method: Method, + uri: Uri, +} + +impl fmt::Debug for ProxyRequest { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ProxyRequest") + .field("method", &self.method) + .field("uri", &self.uri) + .field("headers", &self.headers) + .finish_non_exhaustive() } +} +impl ProxyRequest { + #[inline] pub fn body(&self) -> &Body { &self.body } + #[inline] pub fn body_mut(&mut self) -> &mut Body { &mut self.body } + #[inline] pub fn extensions(&self) -> &Extensions { &self.extensions } + #[inline] pub fn extensions_mut(&mut self) -> &mut Extensions { &mut self.extensions } + #[inline] + pub fn from_request(request: Request, uri: Uri) -> Self { + let (parts, body) = request.into_parts(); + Self { + body, + extensions: parts.extensions, + headers: parts.headers, + method: parts.method, + uri, + } + } + + #[inline] + pub fn headers(&self) -> &HeaderMap { + &self.headers + } + + #[inline] + pub fn headers_mut(&mut self) -> &mut HeaderMap { + &mut self.headers + } + + #[inline] pub fn into_parts(self) -> (Method, Uri, HeaderMap, Body, Extensions) { ( self.method, @@ -85,121 +128,112 @@ impl ProxyRequest { self.extensions, ) } -} -impl fmt::Debug for ProxyRequest { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ProxyRequest") - .field("method", &self.method) - .field("uri", &self.uri) - .field("headers", &self.headers) - .finish() + #[inline] + pub fn method(&self) -> &Method { + &self.method } -} - -pub struct ProxyResponse { - status: StatusCode, - headers: HeaderMap, - body: Body, - extensions: Extensions, -} -impl ProxyResponse { - pub fn new(status: StatusCode, body: Body) -> Self { + #[inline] + pub fn new(method: Method, uri: Uri) -> Self { Self { - status, - headers: HeaderMap::new(), - body, + body: Body::empty(), extensions: Extensions::new(), + headers: HeaderMap::new(), + method, + uri, } } - pub fn status(&self) -> StatusCode { - self.status + #[inline] + pub fn uri(&self) -> &Uri { + &self.uri } +} - pub fn headers_mut(&mut self) -> &mut HeaderMap { - &mut self.headers - } +pub struct ProxyResponse { + body: Body, + extensions: Extensions, + headers: HeaderMap, + status: StatusCode, +} - pub fn headers(&self) -> &HeaderMap { - &self.headers +impl fmt::Debug for ProxyResponse { + #[inline] + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ProxyResponse") + .field("status", &self.status) + .finish_non_exhaustive() } +} +impl ProxyResponse { + #[inline] pub fn body(&self) -> &Body { &self.body } + #[inline] pub fn body_mut(&mut self) -> &mut Body { &mut self.body } + #[inline] pub fn extensions(&self) -> &Extensions { &self.extensions } + #[inline] pub fn extensions_mut(&mut self) -> &mut Extensions { &mut self.extensions } - pub fn into_response(self) -> Response { - let mut builder = response_builder().status(self.status); - for (name, value) in self.headers.iter() { - builder = builder.header(name, value); - } - builder - .body(self.body) - .expect("proxy response builder should not fail") + #[inline] + pub fn headers(&self) -> &HeaderMap { + &self.headers } -} -impl fmt::Debug for ProxyResponse { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ProxyResponse") - .field("status", &self.status) - .finish() + #[inline] + pub fn headers_mut(&mut self) -> &mut HeaderMap { + &mut self.headers } -} - -#[derive(Clone)] -pub struct ProxyHandle { - client: Arc, -} -impl ProxyHandle { - pub fn new(client: Arc) -> Self { - Self { client } + /// # Errors + /// Returns [`EdgeError::internal`] if the underlying `http::Response::builder()` + /// rejects a header — should be unreachable since we only store names/values + /// that were already validated, but propagation lets a faulty upstream stream + /// fail the request instead of crashing the worker. + #[inline] + pub fn into_response(self) -> Result { + let mut builder = response_builder().status(self.status); + for (name, value) in &self.headers { + builder = builder.header(name, value); + } + builder.body(self.body).map_err(EdgeError::internal) } - pub fn with_client(client: C) -> Self - where - C: ProxyClient + 'static, - { + #[inline] + pub fn new(status: StatusCode, body: Body) -> Self { Self { - client: Arc::new(client), + body, + extensions: Extensions::new(), + headers: HeaderMap::new(), + status, } } - pub fn client(&self) -> Arc { - Arc::clone(&self.client) - } - - pub async fn forward(&self, request: ProxyRequest) -> Result { - let response = self.client.send(request).await?; - Ok(response.into_response()) + #[inline] + pub fn status(&self) -> StatusCode { + self.status } } -#[async_trait(?Send)] -pub trait ProxyClient: Send + Sync { - async fn send(&self, request: ProxyRequest) -> Result; -} - pub struct ProxyService { client: C, } impl ProxyService { + #[inline] pub fn new(client: C) -> Self { Self { client } } @@ -209,9 +243,13 @@ impl ProxyService where C: ProxyClient, { + /// # Errors + /// Returns [`EdgeError`] if the underlying [`ProxyClient`] fails or the + /// response cannot be assembled. + #[inline] pub async fn forward(&self, request: ProxyRequest) -> Result { let response = self.client.send(request).await?; - Ok(response.into_response()) + response.into_response() } } @@ -219,33 +257,64 @@ where mod tests { use super::*; use crate::body::Body; + use crate::http::header::HeaderName; use crate::http::{request_builder, HeaderValue, Method, StatusCode, Uri}; use bytes::Bytes; use futures::executor::block_on; - use futures_util::{stream, StreamExt}; + use futures_util::{stream, StreamExt as _}; + + struct EchoBodyClient; + + struct EchoHeadersClient; + + struct EchoMethodClient; + + struct ErrorClient; + + struct StreamingClient; struct TestClient; #[async_trait(?Send)] - impl ProxyClient for TestClient { + impl ProxyClient for EchoBodyClient { async fn send(&self, request: ProxyRequest) -> Result { - let (method, uri, headers, _body, _) = request.into_parts(); - assert_eq!(method, Method::GET); - assert_eq!(uri, Uri::from_static("https://example.com")); - assert_eq!( - headers.get("x-demo"), - Some(&HeaderValue::from_static("true")) - ); + let (_, _, _, body, _) = request.into_parts(); + Ok(ProxyResponse::new(StatusCode::OK, body)) + } + } - let chunks = stream::iter(vec![ - Bytes::from_static(b"hello"), - Bytes::from_static(b" world"), - ]); - Ok(ProxyResponse::new(StatusCode::OK, Body::stream(chunks))) + #[async_trait(?Send)] + impl ProxyClient for EchoHeadersClient { + async fn send(&self, request: ProxyRequest) -> Result { + let mut resp = ProxyResponse::new(StatusCode::OK, Body::empty()); + // Echo back headers with x-echo- prefix + for (name, value) in request.headers() { + let echo_name = format!("x-echo-{}", name.as_str()); + if let Ok(header_name) = echo_name.parse::() { + resp.headers_mut().insert(header_name, value.clone()); + } + } + Ok(resp) } } - struct StreamingClient; + #[async_trait(?Send)] + impl ProxyClient for EchoMethodClient { + async fn send(&self, request: ProxyRequest) -> Result { + let method_str = request.method().as_str(); + Ok(ProxyResponse::new( + StatusCode::OK, + Body::from(method_str.to_owned()), + )) + } + } + + #[async_trait(?Send)] + impl ProxyClient for ErrorClient { + async fn send(&self, _request: ProxyRequest) -> Result { + Err(EdgeError::bad_request("connection failed")) + } + } #[async_trait(?Send)] impl ProxyClient for StreamingClient { @@ -259,20 +328,37 @@ mod tests { } } - #[test] - fn proxy_forward_roundtrips() { - let request = request_builder() - .method(Method::GET) - .uri("/local") - .header("x-demo", "true") - .body(Body::empty()) - .expect("request"); - - let target = Uri::from_static("https://example.com"); - let proxy_request = ProxyRequest::from_request(request, target); - let service = ProxyService::new(TestClient); - let response = block_on(service.forward(proxy_request)).expect("response"); - assert_eq!(response.status(), StatusCode::OK); + #[async_trait(?Send)] + impl ProxyClient for TestClient { + async fn send(&self, request: ProxyRequest) -> Result { + let (method, uri, headers, _body, _) = request.into_parts(); + assert_eq!(method, Method::GET); + assert_eq!(uri, Uri::from_static("https://example.com")); + assert_eq!( + headers.get("x-demo"), + Some(&HeaderValue::from_static("true")) + ); + + let chunks = stream::iter(vec![ + Bytes::from_static(b"hello"), + Bytes::from_static(b" world"), + ]); + Ok(ProxyResponse::new(StatusCode::OK, Body::stream(chunks))) + } + } + + fn collect_body(body: Body) -> Vec { + match body { + Body::Once(bytes) => bytes.to_vec(), + Body::Stream(mut stream) => block_on(async { + let mut data = Vec::new(); + while let Some(result) = stream.next().await { + let chunk = result.expect("chunk"); + data.extend_from_slice(&chunk); + } + data + }), + } } #[test] @@ -294,28 +380,154 @@ mod tests { assert_eq!(collected, b"stream-onestream-two"); } - fn collect_body(body: Body) -> Vec { - match body { - Body::Once(bytes) => bytes.to_vec(), - Body::Stream(mut stream) => block_on(async { - let mut data = Vec::new(); - while let Some(chunk) = stream.next().await { - let chunk = chunk.expect("chunk"); - data.extend_from_slice(&chunk); - } - data - }), + #[test] + fn proxy_forward_roundtrips() { + let request = request_builder() + .method(Method::GET) + .uri("/local") + .header("x-demo", "true") + .body(Body::empty()) + .expect("request"); + + let target = Uri::from_static("https://example.com"); + let proxy_request = ProxyRequest::from_request(request, target); + let service = ProxyService::new(TestClient); + let response = block_on(service.forward(proxy_request)).expect("response"); + assert_eq!(response.status(), StatusCode::OK); + } + + #[test] + fn proxy_forwards_request_body() { + let service = ProxyService::new(EchoBodyClient); + let request = request_builder() + .method(Method::POST) + .uri("/test") + .body(Body::from("request body content")) + .expect("request"); + + let proxy_req = + ProxyRequest::from_request(request, Uri::from_static("https://example.com")); + let response = block_on(service.forward(proxy_req)).expect("response"); + + let body_bytes = collect_body(response.into_body()); + assert_eq!(body_bytes, b"request body content"); + } + + #[test] + fn proxy_forwards_request_headers() { + let service = ProxyService::new(EchoHeadersClient); + let request = request_builder() + .method(Method::GET) + .uri("/test") + .header("x-custom-header", "custom-value") + .header("authorization", "Bearer token123") + .body(Body::empty()) + .expect("request"); + + let proxy_req = + ProxyRequest::from_request(request, Uri::from_static("https://example.com")); + let response = block_on(service.forward(proxy_req)).expect("response"); + + assert_eq!( + response + .headers() + .get("x-echo-x-custom-header") + .and_then(|value| value.to_str().ok()), + Some("custom-value") + ); + assert_eq!( + response + .headers() + .get("x-echo-authorization") + .and_then(|value| value.to_str().ok()), + Some("Bearer token123") + ); + } + + #[test] + fn proxy_forwards_various_methods() { + let service = ProxyService::new(EchoMethodClient); + + for method in [ + Method::GET, + Method::POST, + Method::PUT, + Method::DELETE, + Method::PATCH, + Method::HEAD, + Method::OPTIONS, + ] { + let req = ProxyRequest::new(method.clone(), Uri::from_static("https://example.com")); + let response = block_on(service.forward(req)).expect("response"); + assert_eq!(response.status(), StatusCode::OK); } } - // ProxyRequest tests #[test] - fn proxy_request_new_creates_empty_request() { + fn proxy_handle_forward_returns_response() { + let handle = ProxyHandle::with_client(TestClient); + let request = request_builder() + .method(Method::GET) + .uri("/test") + .header("x-demo", "true") + .body(Body::empty()) + .expect("request"); + + let proxy_req = + ProxyRequest::from_request(request, Uri::from_static("https://example.com")); + let response = block_on(handle.forward(proxy_req)).expect("response"); + assert_eq!(response.status(), StatusCode::OK); + } + + #[test] + fn proxy_handle_new_wraps_client() { + let client = Arc::new(TestClient); + let handle = ProxyHandle::new(client); + assert!(Arc::strong_count(&handle.client()) >= 1); + } + + #[test] + fn proxy_handle_propagates_client_errors() { + let handle = ProxyHandle::with_client(ErrorClient); let req = ProxyRequest::new(Method::GET, Uri::from_static("https://example.com")); - assert_eq!(req.method(), &Method::GET); - assert_eq!(req.uri(), &Uri::from_static("https://example.com")); - assert!(req.headers().is_empty()); - assert!(matches!(req.body(), Body::Once(b) if b.is_empty())); + block_on(handle.forward(req)).expect_err("ErrorClient propagates an error"); + } + + #[test] + fn proxy_handle_with_client_creates_arc() { + let handle = ProxyHandle::with_client(TestClient); + assert!(Arc::strong_count(&handle.client()) >= 1); + } + + #[test] + fn proxy_request_body_mut_allows_modification() { + let mut req = ProxyRequest::new(Method::POST, Uri::from_static("https://example.com")); + *req.body_mut() = Body::from("new body content"); + assert!(matches!( + req.body(), + Body::Once(bytes) if bytes.as_ref() == b"new body content" + )); + } + + #[test] + fn proxy_request_debug_format() { + let mut req = ProxyRequest::new(Method::GET, Uri::from_static("https://example.com")); + req.headers_mut() + .insert("x-debug", HeaderValue::from_static("test")); + let debug = format!("{req:?}"); + assert!(debug.contains("ProxyRequest")); + assert!(debug.contains("GET")); + assert!(debug.contains("example.com")); + } + + #[test] + fn proxy_request_extensions_mut_allows_modification() { + let mut req = ProxyRequest::new(Method::GET, Uri::from_static("https://example.com")); + req.extensions_mut().insert("custom-data".to_owned()); + assert_eq!( + req.extensions().get::(), + Some(&"custom-data".to_owned()) + ); } #[test] @@ -336,7 +548,7 @@ mod tests { proxy_req .headers() .get("x-custom") - .and_then(|v| v.to_str().ok()), + .and_then(|value| value.to_str().ok()), Some("value") ); } @@ -349,26 +561,6 @@ mod tests { assert!(req.headers().get("authorization").is_some()); } - #[test] - fn proxy_request_body_mut_allows_modification() { - let mut req = ProxyRequest::new(Method::POST, Uri::from_static("https://example.com")); - *req.body_mut() = Body::from("new body content"); - assert!(matches!( - req.body(), - Body::Once(bytes) if bytes.as_ref() == b"new body content" - )); - } - - #[test] - fn proxy_request_extensions_mut_allows_modification() { - let mut req = ProxyRequest::new(Method::GET, Uri::from_static("https://example.com")); - req.extensions_mut().insert("custom-data".to_string()); - assert_eq!( - req.extensions().get::(), - Some(&"custom-data".to_string()) - ); - } - #[test] fn proxy_request_into_parts_destructures() { let mut req = ProxyRequest::new( @@ -384,56 +576,51 @@ mod tests { assert_eq!(uri, Uri::from_static("https://example.com/resource")); assert!(headers.get("x-test").is_some()); assert!(matches!( - body, - Body::Once(ref bytes) if bytes.as_ref() == b"body" + &body, + Body::Once(bytes) if bytes.as_ref() == b"body" )); } #[test] - fn proxy_request_debug_format() { - let mut req = ProxyRequest::new(Method::GET, Uri::from_static("https://example.com")); - req.headers_mut() - .insert("x-debug", HeaderValue::from_static("test")); - let debug = format!("{:?}", req); - assert!(debug.contains("ProxyRequest")); - assert!(debug.contains("GET")); - assert!(debug.contains("example.com")); + fn proxy_request_new_creates_empty_request() { + let req = ProxyRequest::new(Method::GET, Uri::from_static("https://example.com")); + assert_eq!(req.method(), &Method::GET); + assert_eq!(req.uri(), &Uri::from_static("https://example.com")); + assert!(req.headers().is_empty()); + assert!(matches!(req.body(), Body::Once(bytes) if bytes.is_empty())); } - // ProxyResponse tests #[test] - fn proxy_response_new_creates_response() { - let resp = ProxyResponse::new(StatusCode::OK, Body::from("response body")); - assert_eq!(resp.status(), StatusCode::OK); + fn proxy_response_body_mut_allows_modification() { + let mut resp = ProxyResponse::new(StatusCode::OK, Body::empty()); + *resp.body_mut() = Body::from("updated body"); assert!(matches!( resp.body(), - Body::Once(bytes) if bytes.as_ref() == b"response body" + Body::Once(bytes) if bytes.as_ref() == b"updated body" )); } #[test] - fn proxy_response_headers_mut_allows_modification() { - let mut resp = ProxyResponse::new(StatusCode::OK, Body::empty()); - resp.headers_mut() - .insert("content-type", HeaderValue::from_static("application/json")); - assert!(resp.headers().get("content-type").is_some()); + fn proxy_response_debug_format() { + let resp = ProxyResponse::new(StatusCode::NOT_FOUND, Body::empty()); + let debug = format!("{resp:?}"); + assert!(debug.contains("ProxyResponse")); + assert!(debug.contains("404")); } #[test] - fn proxy_response_body_mut_allows_modification() { + fn proxy_response_extensions_mut_allows_modification() { let mut resp = ProxyResponse::new(StatusCode::OK, Body::empty()); - *resp.body_mut() = Body::from("updated body"); - assert!(matches!( - resp.body(), - Body::Once(bytes) if bytes.as_ref() == b"updated body" - )); + resp.extensions_mut().insert(42_i32); + assert_eq!(resp.extensions().get::(), Some(&42_i32)); } #[test] - fn proxy_response_extensions_mut_allows_modification() { + fn proxy_response_headers_mut_allows_modification() { let mut resp = ProxyResponse::new(StatusCode::OK, Body::empty()); - resp.extensions_mut().insert(42i32); - assert_eq!(resp.extensions().get::(), Some(&42)); + resp.headers_mut() + .insert("content-type", HeaderValue::from_static("application/json")); + assert!(resp.headers().get("content-type").is_some()); } #[test] @@ -442,57 +629,19 @@ mod tests { resp.headers_mut() .insert("x-custom", HeaderValue::from_static("header")); - let http_resp = resp.into_response(); + let http_resp = resp.into_response().expect("response"); assert_eq!(http_resp.status(), StatusCode::CREATED); assert!(http_resp.headers().get("x-custom").is_some()); } #[test] - fn proxy_response_debug_format() { - let resp = ProxyResponse::new(StatusCode::NOT_FOUND, Body::empty()); - let debug = format!("{:?}", resp); - assert!(debug.contains("ProxyResponse")); - assert!(debug.contains("404")); - } - - // ProxyHandle tests - #[test] - fn proxy_handle_new_wraps_client() { - let client = Arc::new(TestClient); - let handle = ProxyHandle::new(client); - assert!(Arc::strong_count(&handle.client()) >= 1); - } - - #[test] - fn proxy_handle_with_client_creates_arc() { - let handle = ProxyHandle::with_client(TestClient); - assert!(Arc::strong_count(&handle.client()) >= 1); - } - - #[test] - fn proxy_handle_forward_returns_response() { - let handle = ProxyHandle::with_client(TestClient); - let request = request_builder() - .method(Method::GET) - .uri("/test") - .header("x-demo", "true") - .body(Body::empty()) - .expect("request"); - - let proxy_req = - ProxyRequest::from_request(request, Uri::from_static("https://example.com")); - let response = block_on(handle.forward(proxy_req)).expect("response"); - assert_eq!(response.status(), StatusCode::OK); - } - - // ProxyClient error handling - struct ErrorClient; - - #[async_trait(?Send)] - impl ProxyClient for ErrorClient { - async fn send(&self, _request: ProxyRequest) -> Result { - Err(EdgeError::bad_request("connection failed")) - } + fn proxy_response_new_creates_response() { + let resp = ProxyResponse::new(StatusCode::OK, Body::from("response body")); + assert_eq!(resp.status(), StatusCode::OK); + assert!(matches!( + resp.body(), + Body::Once(bytes) if bytes.as_ref() == b"response body" + )); } #[test] @@ -504,122 +653,4 @@ mod tests { let err = result.unwrap_err(); assert_eq!(err.status(), StatusCode::BAD_REQUEST); } - - #[test] - fn proxy_handle_propagates_client_errors() { - let handle = ProxyHandle::with_client(ErrorClient); - let req = ProxyRequest::new(Method::GET, Uri::from_static("https://example.com")); - let result = block_on(handle.forward(req)); - assert!(result.is_err()); - } - - // Test various HTTP methods - struct EchoMethodClient; - - #[async_trait(?Send)] - impl ProxyClient for EchoMethodClient { - async fn send(&self, request: ProxyRequest) -> Result { - let method_str = request.method().as_str(); - Ok(ProxyResponse::new( - StatusCode::OK, - Body::from(method_str.to_string()), - )) - } - } - - #[test] - fn proxy_forwards_various_methods() { - let service = ProxyService::new(EchoMethodClient); - - for method in [ - Method::GET, - Method::POST, - Method::PUT, - Method::DELETE, - Method::PATCH, - Method::HEAD, - Method::OPTIONS, - ] { - let req = ProxyRequest::new(method.clone(), Uri::from_static("https://example.com")); - let response = block_on(service.forward(req)).expect("response"); - assert_eq!(response.status(), StatusCode::OK); - } - } - - // Test body forwarding - struct EchoBodyClient; - - #[async_trait(?Send)] - impl ProxyClient for EchoBodyClient { - async fn send(&self, request: ProxyRequest) -> Result { - let (_, _, _, body, _) = request.into_parts(); - Ok(ProxyResponse::new(StatusCode::OK, body)) - } - } - - #[test] - fn proxy_forwards_request_body() { - let service = ProxyService::new(EchoBodyClient); - let request = request_builder() - .method(Method::POST) - .uri("/test") - .body(Body::from("request body content")) - .expect("request"); - - let proxy_req = - ProxyRequest::from_request(request, Uri::from_static("https://example.com")); - let response = block_on(service.forward(proxy_req)).expect("response"); - - let body_bytes = collect_body(response.into_body()); - assert_eq!(body_bytes, b"request body content"); - } - - // Test header forwarding - struct EchoHeadersClient; - - #[async_trait(?Send)] - impl ProxyClient for EchoHeadersClient { - async fn send(&self, request: ProxyRequest) -> Result { - let mut resp = ProxyResponse::new(StatusCode::OK, Body::empty()); - // Echo back headers with x-echo- prefix - for (name, value) in request.headers().iter() { - let echo_name = format!("x-echo-{}", name.as_str()); - if let Ok(header_name) = echo_name.parse::() { - resp.headers_mut().insert(header_name, value.clone()); - } - } - Ok(resp) - } - } - - #[test] - fn proxy_forwards_request_headers() { - let service = ProxyService::new(EchoHeadersClient); - let request = request_builder() - .method(Method::GET) - .uri("/test") - .header("x-custom-header", "custom-value") - .header("authorization", "Bearer token123") - .body(Body::empty()) - .expect("request"); - - let proxy_req = - ProxyRequest::from_request(request, Uri::from_static("https://example.com")); - let response = block_on(service.forward(proxy_req)).expect("response"); - - assert_eq!( - response - .headers() - .get("x-echo-x-custom-header") - .and_then(|v| v.to_str().ok()), - Some("custom-value") - ); - assert_eq!( - response - .headers() - .get("x-echo-authorization") - .and_then(|v| v.to_str().ok()), - Some("Bearer token123") - ); - } } diff --git a/crates/edgezero-core/src/responder.rs b/crates/edgezero-core/src/responder.rs index d75ecb0..745f4d5 100644 --- a/crates/edgezero-core/src/responder.rs +++ b/crates/edgezero-core/src/responder.rs @@ -3,6 +3,8 @@ use crate::http::Response; use crate::response::IntoResponse; pub trait Responder: Sized { + /// # Errors + /// Returns [`EdgeError`] if the value cannot be turned into a response (e.g., a `Result`'s `Err` variant). fn respond(self) -> Result; } @@ -10,8 +12,9 @@ impl Responder for T where T: IntoResponse, { + #[inline] fn respond(self) -> Result { - Ok(self.into_response()) + self.into_response() } } @@ -19,8 +22,9 @@ impl Responder for Result where T: IntoResponse, { + #[inline] fn respond(self) -> Result { - self.map(IntoResponse::into_response) + self.and_then(IntoResponse::into_response) } } @@ -34,7 +38,7 @@ mod tests { fn responder_for_into_response_types() { let response = "hello".respond().expect("response"); assert_eq!(response.status(), StatusCode::OK); - assert_eq!(response.body().as_bytes(), b"hello"); + assert_eq!(response.body().as_bytes().expect("buffered"), b"hello"); } #[test] diff --git a/crates/edgezero-core/src/response.rs b/crates/edgezero-core/src/response.rs index 1c1e94c..807604a 100644 --- a/crates/edgezero-core/src/response.rs +++ b/crates/edgezero-core/src/response.rs @@ -1,34 +1,48 @@ use crate::body::Body; +use crate::error::EdgeError; use crate::http::{ header::{CONTENT_LENGTH, CONTENT_TYPE}, HeaderValue, Response, StatusCode, }; /// Convert common return types into `Response`. +/// +/// **Breaking change (pre-1.0):** this trait now returns `Result`. Callers must propagate response-building failures (typically +/// invalid headers) instead of letting them panic at the `http::Builder` +/// boundary. pub trait IntoResponse { - fn into_response(self) -> Response; + /// # Errors + /// Returns [`EdgeError::internal`] if the underlying HTTP response cannot + /// be assembled — propagated so the request can fail cleanly instead of + /// crashing the worker. + fn into_response(self) -> Result; } impl IntoResponse for Response { - fn into_response(self) -> Response { - self + #[inline] + fn into_response(self) -> Result { + Ok(self) } } impl IntoResponse for Body { - fn into_response(self) -> Response { + #[inline] + fn into_response(self) -> Result { response_with_body(StatusCode::OK, self) } } impl IntoResponse for &str { - fn into_response(self) -> Response { + #[inline] + fn into_response(self) -> Result { response_with_body(StatusCode::OK, Body::text(self)) } } impl IntoResponse for String { - fn into_response(self) -> Response { + #[inline] + fn into_response(self) -> Result { response_with_body(StatusCode::OK, Body::text(self)) } } @@ -36,6 +50,7 @@ impl IntoResponse for String { pub struct Text(T); impl Text { + #[inline] pub fn new(value: T) -> Self { Self(value) } @@ -45,13 +60,15 @@ impl IntoResponse for Text where T: Into, { - fn into_response(self) -> Response { + #[inline] + fn into_response(self) -> Result { response_with_body(StatusCode::OK, Body::text(self.0.into())) } } impl IntoResponse for () { - fn into_response(self) -> Response { + #[inline] + fn into_response(self) -> Result { response_with_body(StatusCode::NO_CONTENT, Body::empty()) } } @@ -60,20 +77,25 @@ impl IntoResponse for (StatusCode, T) where T: IntoResponse, { - fn into_response(self) -> Response { + #[inline] + fn into_response(self) -> Result { let (status, inner) = self; - let mut response = inner.into_response(); + let mut response = inner.into_response()?; *response.status_mut() = status; - response + Ok(response) } } -pub fn response_with_body(status: StatusCode, body: Body) -> Response { +/// # Errors +/// Returns [`EdgeError::internal`] if the underlying [`http::response::Builder`] +/// rejects the supplied status, headers, or body. +#[inline] +pub fn response_with_body(status: StatusCode, body: Body) -> Result { use crate::http::response_builder; let mut builder = response_builder().status(status); - if let Body::Once(ref bytes) = body { + if let Body::Once(bytes) = &body { if !bytes.is_empty() { builder = builder .header(CONTENT_LENGTH, bytes.len().to_string()) @@ -84,9 +106,7 @@ pub fn response_with_body(status: StatusCode, body: Body) -> Response { } } - builder - .body(body) - .expect("static response builder should not fail") + builder.body(body).map_err(EdgeError::internal) } #[cfg(test)] @@ -95,20 +115,20 @@ mod tests { #[test] fn response_with_body_sets_length_and_type() { - let response = response_with_body(StatusCode::OK, Body::from("hello")); + let response = response_with_body(StatusCode::OK, Body::from("hello")).expect("response"); assert_eq!(response.status(), StatusCode::OK); let headers = response.headers(); assert_eq!( headers .get(CONTENT_LENGTH) - .and_then(|v| v.to_str().ok()) + .and_then(|value| value.to_str().ok()) .unwrap(), "5" ); assert_eq!( headers .get(CONTENT_TYPE) - .and_then(|v| v.to_str().ok()) + .and_then(|value| value.to_str().ok()) .unwrap(), "text/plain; charset=utf-8" ); @@ -116,28 +136,30 @@ mod tests { #[test] fn empty_body_does_not_set_length() { - let response = response_with_body(StatusCode::OK, Body::empty()); + let response = response_with_body(StatusCode::OK, Body::empty()).expect("response"); assert!(response.headers().get(CONTENT_LENGTH).is_none()); } #[test] fn text_wrapper_builds_response() { - let response = Text::new("hello").into_response(); + let response = Text::new("hello").into_response().expect("response"); assert_eq!(response.status(), StatusCode::OK); - assert_eq!(response.body().as_bytes(), b"hello"); + assert_eq!(response.body().as_bytes().expect("buffered"), b"hello"); } #[test] fn unit_type_sets_no_content() { - let response = ().into_response(); + let response = ().into_response().expect("response"); assert_eq!(response.status(), StatusCode::NO_CONTENT); - assert!(response.body().as_bytes().is_empty()); + assert!(response.body().as_bytes().expect("buffered").is_empty()); } #[test] fn status_code_tuple_overrides_status() { - let response = (StatusCode::CREATED, "created").into_response(); + let response = (StatusCode::CREATED, "created") + .into_response() + .expect("response"); assert_eq!(response.status(), StatusCode::CREATED); - assert_eq!(response.body().as_bytes(), b"created"); + assert_eq!(response.body().as_bytes().expect("buffered"), b"created"); } } diff --git a/crates/edgezero-core/src/router.rs b/crates/edgezero-core/src/router.rs index e524fa8..18e242d 100644 --- a/crates/edgezero-core/src/router.rs +++ b/crates/edgezero-core/src/router.rs @@ -1,5 +1,6 @@ use std::collections::{HashMap, HashSet}; use std::sync::Arc; +use std::task::{Context, Poll}; use matchit::Router as PathRouter; use serde::Serialize; @@ -15,10 +16,26 @@ use crate::http::{ }; use crate::middleware::{BoxMiddleware, Middleware, Next}; use crate::params::PathParams; -use crate::response::IntoResponse; +use crate::response::IntoResponse as _; pub const DEFAULT_ROUTE_LISTING_PATH: &str = "/__edgezero/routes"; +struct RouteEntry { + handler: BoxHandler, +} + +impl Clone for RouteEntry { + fn clone(&self) -> Self { + Self { + handler: Arc::clone(&self.handler), + } + } + + fn clone_from(&mut self, source: &Self) { + self.handler = Arc::clone(&source.handler); + } +} + #[derive(Clone, Debug)] pub struct RouteInfo { method: Method, @@ -26,17 +43,22 @@ pub struct RouteInfo { } impl RouteInfo { - pub fn new(method: Method, path: impl Into) -> Self { + #[must_use] + #[inline] + pub fn method(&self) -> &Method { + &self.method + } + + #[inline] + pub fn new>(method: Method, path: S) -> Self { Self { method, path: path.into(), } } - pub fn method(&self) -> &Method { - &self.method - } - + #[must_use] + #[inline] pub fn path(&self) -> &str { &self.path } @@ -48,119 +70,74 @@ struct RouteListingEntry { path: String, } -fn build_listing_response( - payload: &T, - builder: ResponseBuilder, -) -> Result { - let body = Body::json(payload).map_err(EdgeError::internal)?; - let response = builder - .status(StatusCode::OK) - .header(CONTENT_TYPE, HeaderValue::from_static("application/json")) - .body(body) - .map_err(EdgeError::internal)?; - Ok(response) +enum RouteMatch<'route> { + Found(&'route RouteEntry, PathParams), + MethodNotAllowed(Vec), + NotFound, } #[derive(Default)] pub struct RouterBuilder { - routes: HashMap>, middlewares: Vec, route_info: Vec, route_listing_path: Option, + routes: HashMap>, } impl RouterBuilder { - pub fn new() -> Self { - Self::default() - } - - pub fn enable_route_listing(self) -> Self { - self.enable_route_listing_at(DEFAULT_ROUTE_LISTING_PATH) - } - - pub fn enable_route_listing_at(mut self, path: S) -> Self - where - S: Into, - { - let path = path.into(); - assert!(!path.is_empty(), "route listing path cannot be empty"); - assert!( - path.starts_with('/'), - "route listing path must begin with '/'" - ); - self.route_listing_path = Some(path); - self - } - - pub fn route(mut self, path: &str, method: Method, handler: H) -> Self - where - H: IntoHandler, - { - self.add_route(path, method, handler); - self - } - - pub fn get(self, path: &str, handler: H) -> Self - where - H: IntoHandler, - { - self.route(path, Method::GET, handler) - } - - pub fn post(self, path: &str, handler: H) -> Self - where - H: IntoHandler, - { - self.route(path, Method::POST, handler) - } - - pub fn put(self, path: &str, handler: H) -> Self - where - H: IntoHandler, - { - self.route(path, Method::PUT, handler) - } - - pub fn delete(self, path: &str, handler: H) -> Self + #[expect( + clippy::panic, + reason = "duplicate route is a build-time programmer error, not a runtime condition" + )] + fn add_route(&mut self, path: &str, method: Method, handler: H) where H: IntoHandler, { - self.route(path, Method::DELETE, handler) - } - - pub fn middleware(mut self, middleware: M) -> Self - where - M: Middleware, - { - self.middlewares.push(Arc::new(middleware)); - self - } + let router = self.routes.entry(method.clone()).or_default(); - pub fn middleware_arc(mut self, middleware: BoxMiddleware) -> Self { - self.middlewares.push(middleware); - self - } + router + .insert( + path, + RouteEntry { + handler: handler.into_handler(), + }, + ) + .unwrap_or_else(|err| panic!("duplicate route definition for {path}: {err}")); + self.route_info + .push(RouteInfo::new(method, path.to_owned())); + } + + /// # Panics + /// Panics if a route is registered for both an explicit path and the route-listing path. + /// Both paths are programmer-supplied at build time; a duplicate is a routing-config bug + /// that should fail loudly before the binary ever serves traffic. + #[expect( + clippy::panic, + reason = "duplicate route is a build-time programmer error, not a runtime condition" + )] + #[must_use] + #[inline] pub fn build(mut self) -> RouterService { let listing_path = self.route_listing_path.clone(); let mut route_info = self.route_info.clone(); - if let Some(ref path) = listing_path { + if let Some(path) = &listing_path { route_info.push(RouteInfo::new(Method::GET, path.clone())); } - let route_index = Arc::new(route_info); + let route_index: Arc<[RouteInfo]> = Arc::from(route_info); if let Some(path) = listing_path { - let index = Arc::clone(&route_index); + let outer_index = Arc::clone(&route_index); let listing_handler = move |_ctx: RequestContext| { - let index = Arc::clone(&index); + let inner_index = Arc::clone(&outer_index); async move { - let payload: Vec = index + let payload: Vec = inner_index .iter() .map(|route| RouteListingEntry { - method: route.method().as_str().to_string(), - path: route.path().to_string(), + method: route.method().as_str().to_owned(), + path: route.path().to_owned(), }) .collect(); @@ -177,85 +154,119 @@ impl RouterBuilder { handler: listing_handler.into_handler(), }, ) - .unwrap_or_else(|err| panic!("duplicate route definition for {}: {}", path, err)); + .unwrap_or_else(|err| panic!("duplicate route definition for {path}: {err}")); } RouterService::new(self.routes, self.middlewares, route_index) } - fn add_route(&mut self, path: &str, method: Method, handler: H) + #[must_use] + #[inline] + pub fn delete(self, path: &str, handler: H) -> Self where H: IntoHandler, { - let router = self.routes.entry(method.clone()).or_default(); + self.route(path, Method::DELETE, handler) + } - router - .insert( - path, - RouteEntry { - handler: handler.into_handler(), - }, - ) - .unwrap_or_else(|err| panic!("duplicate route definition for {}: {}", path, err)); + #[must_use] + #[inline] + pub fn enable_route_listing(self) -> Self { + self.enable_route_listing_at(DEFAULT_ROUTE_LISTING_PATH) + } - self.route_info - .push(RouteInfo::new(method, path.to_string())); + /// # Panics + /// Panics if `path` is empty or does not begin with `/`. + #[must_use] + #[inline] + pub fn enable_route_listing_at(mut self, path: S) -> Self + where + S: Into, + { + let route_listing_path = path.into(); + assert!( + !route_listing_path.is_empty(), + "route listing path cannot be empty" + ); + assert!( + route_listing_path.starts_with('/'), + "route listing path must begin with '/'" + ); + self.route_listing_path = Some(route_listing_path); + self } -} -#[derive(Clone)] -pub struct RouterService { - inner: Arc, -} + #[must_use] + #[inline] + pub fn get(self, path: &str, handler: H) -> Self + where + H: IntoHandler, + { + self.route(path, Method::GET, handler) + } -impl RouterService { - fn new( - routes: HashMap>, - middlewares: Vec, - route_index: Arc>, - ) -> Self { - Self { - inner: Arc::new(RouterInner { - routes, - middlewares, - route_index, - }), - } + #[must_use] + #[inline] + pub fn middleware(mut self, middleware: M) -> Self + where + M: Middleware, + { + self.middlewares.push(Arc::new(middleware)); + self } - pub fn builder() -> RouterBuilder { - RouterBuilder::new() + #[must_use] + #[inline] + pub fn middleware_arc(mut self, middleware: BoxMiddleware) -> Self { + self.middlewares.push(middleware); + self } - pub fn routes(&self) -> Vec { - (*self.inner.route_index).clone() + #[must_use] + #[inline] + pub fn new() -> Self { + Self::default() } - pub async fn oneshot(&self, request: Request) -> Response { - let mut service = self.clone(); - match service.call(request).await { - Ok(response) => response, - Err(err) => err.into_response(), - } + #[must_use] + #[inline] + pub fn post(self, path: &str, handler: H) -> Self + where + H: IntoHandler, + { + self.route(path, Method::POST, handler) + } + + #[must_use] + #[inline] + pub fn put(self, path: &str, handler: H) -> Self + where + H: IntoHandler, + { + self.route(path, Method::PUT, handler) + } + + #[must_use] + #[inline] + pub fn route(mut self, path: &str, method: Method, handler: H) -> Self + where + H: IntoHandler, + { + self.add_route(path, method, handler); + self } } struct RouterInner { - routes: HashMap>, middlewares: Vec, - route_index: Arc>, -} - -enum RouteMatch<'a> { - Found(&'a RouteEntry, PathParams), - MethodNotAllowed(Vec), - NotFound, + route_index: Arc<[RouteInfo]>, + routes: HashMap>, } impl RouterInner { async fn dispatch(&self, request: Request) -> Result { let method = request.method().clone(); - let path = request.uri().path().to_string(); + let path = request.uri().path().to_owned(); match self.find_route(&method, &path) { RouteMatch::Found(entry, params) => { @@ -264,7 +275,7 @@ impl RouterInner { next.run(ctx).await } RouteMatch::MethodNotAllowed(mut allowed) => { - allowed.sort_by(|a, b| a.as_str().cmp(b.as_str())); + allowed.sort_by(|left, right| left.as_str().cmp(right.as_str())); Err(EdgeError::method_not_allowed(&method, &allowed)) } RouteMatch::NotFound => Err(EdgeError::not_found(path)), @@ -278,19 +289,19 @@ impl RouterInner { matched .params .iter() - .map(|(k, v)| (k.to_string(), v.to_string())) + .map(|(key, value)| (key.to_owned(), value.to_owned())) .collect(), ); return RouteMatch::Found(matched.value, params); } } - let mut allowed = HashSet::new(); - for (candidate_method, router) in &self.routes { - if router.at(path).is_ok() { - allowed.insert(candidate_method.clone()); - } - } + let allowed: HashSet = self + .routes + .iter() + .filter(|(_, router)| router.at(path).is_ok()) + .map(|(candidate_method, _)| candidate_method.clone()) + .collect(); if allowed.is_empty() { RouteMatch::NotFound @@ -300,34 +311,79 @@ impl RouterInner { } } +#[derive(Clone)] +pub struct RouterService { + inner: Arc, +} + impl Service for RouterService { - type Response = Response; type Error = EdgeError; type Future = HandlerFuture; + type Response = Response; - fn poll_ready( - &mut self, - _cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - std::task::Poll::Ready(Ok(())) + #[inline] + fn call(&mut self, req: Request) -> Self::Future { + let inner = Arc::clone(&self.inner); + Box::pin(async move { inner.dispatch(req).await }) } - fn call(&mut self, request: Request) -> Self::Future { - let inner = Arc::clone(&self.inner); - Box::pin(async move { inner.dispatch(request).await }) + #[inline] + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) } } -struct RouteEntry { - handler: BoxHandler, -} +impl RouterService { + #[must_use] + #[inline] + pub fn builder() -> RouterBuilder { + RouterBuilder::new() + } -impl Clone for RouteEntry { - fn clone(&self) -> Self { + fn new( + routes: HashMap>, + middlewares: Vec, + route_index: Arc<[RouteInfo]>, + ) -> Self { Self { - handler: Arc::clone(&self.handler), + inner: Arc::new(RouterInner { + middlewares, + route_index, + routes, + }), + } + } + + /// # Errors + /// Returns [`EdgeError`] if the dispatched handler errors AND the error + /// itself fails to render as a response. + #[inline] + pub async fn oneshot(&self, request: Request) -> Result { + let mut service = self.clone(); + match service.call(request).await { + Ok(response) => Ok(response), + Err(err) => err.into_response(), } } + + #[must_use] + #[inline] + pub fn routes(&self) -> Vec { + self.inner.route_index.to_vec() + } +} + +fn build_listing_response( + payload: &T, + builder: ResponseBuilder, +) -> Result { + let body = Body::json(payload).map_err(EdgeError::internal)?; + let response = builder + .status(StatusCode::OK) + .header(CONTENT_TYPE, HeaderValue::from_static("application/json")) + .body(body) + .map_err(EdgeError::internal)?; + Ok(response) } #[cfg(test)] @@ -341,137 +397,162 @@ mod tests { use crate::response::response_with_body; use futures::executor::block_on; use futures::task::noop_waker_ref; + use serde::ser::Error as _; use serde::{Deserialize, Serialize}; use serde_json::json; use std::sync::{Arc, Mutex}; use std::task::{Context, Poll}; async fn ok_handler(_ctx: RequestContext) -> Result { - Ok(response_with_body(StatusCode::OK, Body::empty())) + response_with_body(StatusCode::OK, Body::empty()) } #[test] - fn route_matches_path_params() { - #[derive(Deserialize)] - struct Params { - id: String, + fn builder_accepts_middleware_and_middleware_arc() { + struct RecordingMiddleware { + log: Arc>>, + name: &'static str, } - async fn handler(ctx: RequestContext) -> Result { - let params: Params = ctx.path()?; - Ok(format!("hello {}", params.id)) + #[async_trait::async_trait(?Send)] + impl Middleware for RecordingMiddleware { + async fn handle( + &self, + ctx: RequestContext, + next: Next<'_>, + ) -> Result { + self.log.lock().unwrap().push(self.name); + next.run(ctx).await + } } - let service = RouterService::builder().get("/hello/{id}", handler).build(); + let log = Arc::new(Mutex::new(Vec::new())); + let first = RecordingMiddleware { + log: Arc::clone(&log), + name: "first", + }; + let second = RecordingMiddleware { + log: Arc::clone(&log), + name: "second", + }; + + let service = RouterService::builder() + .middleware(first) + .middleware_arc({ + let arc: BoxMiddleware = Arc::new(second); + arc + }) + .get("/test", ok_handler) + .build(); let request = request_builder() .method(Method::GET) - .uri("/hello/world") + .uri("/test") .body(Body::empty()) .expect("request"); - let response = block_on(service.clone().call(request)).expect("response"); assert_eq!(response.status(), StatusCode::OK); - assert_eq!(response.body().as_bytes(), b"hello world"); + + let entries = log.lock().unwrap().clone(); + assert_eq!(entries, vec!["first", "second"]); } #[test] - fn route_listing_outputs_all_routes() { - async fn noop(_ctx: RequestContext) -> Result<(), EdgeError> { - Ok(()) - } - + fn builder_supports_put_and_delete_routes() { let service = RouterService::builder() - .enable_route_listing() - .get("/health", noop) - .post("/items", noop) + .put("/items", ok_handler) + .delete("/items", ok_handler) .build(); - let request = request_builder() - .method(Method::GET) - .uri(DEFAULT_ROUTE_LISTING_PATH) + let put_request = request_builder() + .method(Method::PUT) + .uri("/items") .body(Body::empty()) .expect("request"); + let put_response = block_on(service.clone().call(put_request)).expect("response"); + assert_eq!(put_response.status(), StatusCode::OK); - let response = block_on(service.clone().call(request)).expect("response"); - assert_eq!(response.status(), StatusCode::OK); + let delete_request = request_builder() + .method(Method::DELETE) + .uri("/items") + .body(Body::empty()) + .expect("request"); + let delete_response = block_on(service.clone().call(delete_request)).expect("response"); + assert_eq!(delete_response.status(), StatusCode::OK); + } - let body = response.body().as_bytes(); - let payload: Vec = serde_json::from_slice(body).expect("json payload"); + #[test] + #[should_panic(expected = "duplicate route definition")] + fn duplicate_route_definition_panics() { + let _service = RouterService::builder() + .get("/dup", ok_handler) + .get("/dup", ok_handler) + .build(); + } - assert!(payload.contains(&json!({ - "method": "GET", - "path": DEFAULT_ROUTE_LISTING_PATH - }))); - assert!(payload.contains(&json!({ - "method": "GET", - "path": "/health" - }))); - assert!(payload.contains(&json!({ - "method": "POST", - "path": "/items" - }))); + #[test] + fn handler_returns_bad_request_for_invalid_path_params() { + #[derive(Deserialize)] + struct Params { + id: String, + } - let routes = service.routes(); - assert!(routes - .iter() - .any(|route| route.path() == "/health" && *route.method() == Method::GET)); + async fn handler(ctx: RequestContext) -> Result { + let params: Params = ctx.path()?; + let id = params + .id + .parse::() + .map_err(|_e| EdgeError::bad_request("invalid id"))?; + Ok(format!("hello {id}")) + } - let health_request = request_builder() + let service = RouterService::builder().get("/items/{id}", handler).build(); + let ok_request = request_builder() .method(Method::GET) - .uri("/health") + .uri("/items/42") .body(Body::empty()) .expect("request"); - let health_response = block_on(service.clone().call(health_request)).expect("response"); - assert_eq!(health_response.status(), StatusCode::NO_CONTENT); + let ok_response = block_on(service.clone().call(ok_request)).expect("response"); + assert_eq!(ok_response.status(), StatusCode::OK); + assert_eq!( + ok_response.body().as_bytes().expect("buffered"), + b"hello 42" + ); - let items_request = request_builder() - .method(Method::POST) - .uri("/items") + let request = request_builder() + .method(Method::GET) + .uri("/items/abc") .body(Body::empty()) .expect("request"); - let items_response = block_on(service.clone().call(items_request)).expect("response"); - assert_eq!(items_response.status(), StatusCode::NO_CONTENT); - } - #[test] - fn route_listing_response_handles_json_failure() { - struct FailingSerialize; - - impl Serialize for FailingSerialize { - fn serialize(&self, _serializer: S) -> Result - where - S: serde::Serializer, - { - Err(serde::ser::Error::custom("boom")) - } - } - - let err = build_listing_response(&FailingSerialize, response_builder()) - .expect_err("expected error"); - assert_eq!(err.status(), StatusCode::INTERNAL_SERVER_ERROR); + let error = block_on(service.clone().call(request)).expect_err("error"); + assert_eq!(error.status(), StatusCode::BAD_REQUEST); } #[test] - fn route_listing_response_handles_builder_failure() { - #[derive(Serialize)] - struct Payload { - ok: bool, - } + fn oneshot_returns_error_response() { + let service = RouterService::builder().build(); + let request = request_builder() + .method(Method::GET) + .uri("/missing") + .body(Body::empty()) + .expect("request"); - let builder = response_builder().header("bad\nname", "value"); - let err = - build_listing_response(&Payload { ok: true }, builder).expect_err("expected error"); - assert_eq!(err.status(), StatusCode::INTERNAL_SERVER_ERROR); + let response = block_on(service.oneshot(request)).expect("response"); + assert_eq!(response.status(), StatusCode::NOT_FOUND); } #[test] - #[should_panic(expected = "duplicate route definition")] - fn route_listing_duplicate_path_panics() { - RouterService::builder() - .enable_route_listing() - .get(DEFAULT_ROUTE_LISTING_PATH, ok_handler) - .build(); + fn oneshot_returns_success_response() { + let service = RouterService::builder().get("/ok", ok_handler).build(); + let request = request_builder() + .method(Method::GET) + .uri("/ok") + .body(Body::empty()) + .expect("request"); + + let response = block_on(service.oneshot(request)).expect("response"); + assert_eq!(response.status(), StatusCode::OK); } #[test] @@ -519,193 +600,159 @@ mod tests { } #[test] - fn handler_returns_bad_request_for_invalid_path_params() { - #[derive(Deserialize)] - struct Params { - id: String, - } - - async fn handler(ctx: RequestContext) -> Result { - let params: Params = ctx.path()?; - let id = params - .id - .parse::() - .map_err(|_| EdgeError::bad_request("invalid id"))?; - Ok(format!("hello {}", id)) - } - - let service = RouterService::builder().get("/items/{id}", handler).build(); - let ok_request = request_builder() - .method(Method::GET) - .uri("/items/42") - .body(Body::empty()) - .expect("request"); - let ok_response = block_on(service.clone().call(ok_request)).expect("response"); - assert_eq!(ok_response.status(), StatusCode::OK); - assert_eq!(ok_response.body().as_bytes(), b"hello 42"); + fn route_entry_clone_copies_handler() { + let entry = RouteEntry { + handler: ok_handler.into_handler(), + }; + let cloned = entry.clone(); let request = request_builder() .method(Method::GET) - .uri("/items/abc") + .uri("/test") .body(Body::empty()) .expect("request"); - - let error = block_on(service.clone().call(request)).expect_err("error"); - assert_eq!(error.status(), StatusCode::BAD_REQUEST); + let ctx = RequestContext::new(request, PathParams::default()); + let response = block_on(cloned.handler.call(ctx)).expect("response"); + assert_eq!(response.status(), StatusCode::OK); } #[test] - fn streams_body_through_router() { - use bytes::Bytes; - use futures_util::stream; - use futures_util::StreamExt; - - async fn handler(_ctx: RequestContext) -> Result { - let chunks = stream::iter(vec![ - Bytes::from_static(b"chunk-one\n"), - Bytes::from_static(b"chunk-two\n"), - ]); + #[should_panic(expected = "duplicate route definition")] + fn route_listing_duplicate_path_panics() { + let _service = RouterService::builder() + .enable_route_listing() + .get(DEFAULT_ROUTE_LISTING_PATH, ok_handler) + .build(); + } - Ok((StatusCode::OK, Body::stream(chunks)).into_response()) + #[test] + fn route_listing_outputs_all_routes() { + async fn noop(_ctx: RequestContext) -> Result<(), EdgeError> { + Ok(()) } - let service = RouterService::builder().get("/stream", handler).build(); + let service = RouterService::builder() + .enable_route_listing() + .get("/health", noop) + .post("/items", noop) + .build(); let request = request_builder() .method(Method::GET) - .uri("/stream") + .uri(DEFAULT_ROUTE_LISTING_PATH) .body(Body::empty()) .expect("request"); let response = block_on(service.clone().call(request)).expect("response"); - let mut stream = response.into_body().into_stream().expect("stream body"); - let collected = block_on(async { - let mut acc = Vec::new(); - while let Some(chunk) = stream.next().await { - let chunk = chunk.expect("chunk"); - acc.extend_from_slice(&chunk); - } - acc - }); - assert_eq!(collected, b"chunk-one\nchunk-two\n"); - } + assert_eq!(response.status(), StatusCode::OK); - #[test] - #[should_panic(expected = "route listing path cannot be empty")] - fn route_listing_rejects_empty_path() { - let _ = RouterService::builder().enable_route_listing_at(""); - } + let body = response.body().as_bytes().expect("buffered"); + let payload: Vec = serde_json::from_slice(body).expect("json payload"); - #[test] - #[should_panic(expected = "route listing path must begin with '/'")] - fn route_listing_rejects_missing_slash() { - let _ = RouterService::builder().enable_route_listing_at("routes"); - } + assert!(payload.contains(&json!({ + "method": "GET", + "path": DEFAULT_ROUTE_LISTING_PATH + }))); + assert!(payload.contains(&json!({ + "method": "GET", + "path": "/health" + }))); + assert!(payload.contains(&json!({ + "method": "POST", + "path": "/items" + }))); - #[test] - fn builder_supports_put_and_delete_routes() { - let service = RouterService::builder() - .put("/items", ok_handler) - .delete("/items", ok_handler) - .build(); + let routes = service.routes(); + assert!(routes + .iter() + .any(|route| route.path() == "/health" && *route.method() == Method::GET)); - let put_request = request_builder() - .method(Method::PUT) - .uri("/items") + let health_request = request_builder() + .method(Method::GET) + .uri("/health") .body(Body::empty()) .expect("request"); - let put_response = block_on(service.clone().call(put_request)).expect("response"); - assert_eq!(put_response.status(), StatusCode::OK); + let health_response = block_on(service.clone().call(health_request)).expect("response"); + assert_eq!(health_response.status(), StatusCode::NO_CONTENT); - let delete_request = request_builder() - .method(Method::DELETE) + let items_request = request_builder() + .method(Method::POST) .uri("/items") .body(Body::empty()) .expect("request"); - let delete_response = block_on(service.clone().call(delete_request)).expect("response"); - assert_eq!(delete_response.status(), StatusCode::OK); + let items_response = block_on(service.clone().call(items_request)).expect("response"); + assert_eq!(items_response.status(), StatusCode::NO_CONTENT); } #[test] - #[should_panic(expected = "duplicate route definition")] - fn duplicate_route_definition_panics() { - RouterService::builder() - .get("/dup", ok_handler) - .get("/dup", ok_handler) - .build(); + #[should_panic(expected = "route listing path cannot be empty")] + fn route_listing_rejects_empty_path() { + let _builder = RouterService::builder().enable_route_listing_at(""); } #[test] - fn builder_accepts_middleware_and_middleware_arc() { - struct RecordingMiddleware { - log: Arc>>, - name: &'static str, - } + #[should_panic(expected = "route listing path must begin with '/'")] + fn route_listing_rejects_missing_slash() { + let _builder = RouterService::builder().enable_route_listing_at("routes"); + } - #[async_trait::async_trait(?Send)] - impl Middleware for RecordingMiddleware { - async fn handle( - &self, - ctx: RequestContext, - next: Next<'_>, - ) -> Result { - self.log.lock().unwrap().push(self.name); - next.run(ctx).await - } + #[test] + fn route_listing_response_handles_builder_failure() { + #[derive(Serialize)] + struct Payload { + ok: bool, } - let log = Arc::new(Mutex::new(Vec::new())); - let first = RecordingMiddleware { - log: Arc::clone(&log), - name: "first", - }; - let second = RecordingMiddleware { - log: Arc::clone(&log), - name: "second", - }; + let builder = response_builder().header("bad\nname", "value"); + let err = + build_listing_response(&Payload { ok: true }, builder).expect_err("expected error"); + assert_eq!(err.status(), StatusCode::INTERNAL_SERVER_ERROR); + } - let service = RouterService::builder() - .middleware(first) - .middleware_arc(Arc::new(second) as BoxMiddleware) - .get("/test", ok_handler) - .build(); + #[test] + fn route_listing_response_handles_json_failure() { + struct FailingSerialize; - let request = request_builder() - .method(Method::GET) - .uri("/test") - .body(Body::empty()) - .expect("request"); - let response = block_on(service.clone().call(request)).expect("response"); - assert_eq!(response.status(), StatusCode::OK); + impl Serialize for FailingSerialize { + fn serialize(&self, _serializer: S) -> Result + where + S: serde::Serializer, + { + Err(S::Error::custom("boom")) + } + } - let entries = log.lock().unwrap().clone(); - assert_eq!(entries, vec!["first", "second"]); + let err = build_listing_response(&FailingSerialize, response_builder()) + .expect_err("expected error"); + assert_eq!(err.status(), StatusCode::INTERNAL_SERVER_ERROR); } #[test] - fn oneshot_returns_success_response() { - let service = RouterService::builder().get("/ok", ok_handler).build(); - let request = request_builder() - .method(Method::GET) - .uri("/ok") - .body(Body::empty()) - .expect("request"); + fn route_matches_path_params() { + #[derive(Deserialize)] + struct Params { + id: String, + } - let response = block_on(service.oneshot(request)); - assert_eq!(response.status(), StatusCode::OK); - } + async fn handler(ctx: RequestContext) -> Result { + let params: Params = ctx.path()?; + Ok(format!("hello {}", params.id)) + } + + let service = RouterService::builder().get("/hello/{id}", handler).build(); - #[test] - fn oneshot_returns_error_response() { - let service = RouterService::builder().build(); let request = request_builder() .method(Method::GET) - .uri("/missing") + .uri("/hello/world") .body(Body::empty()) .expect("request"); - let response = block_on(service.oneshot(request)); - assert_eq!(response.status(), StatusCode::NOT_FOUND); + let response = block_on(service.clone().call(request)).expect("response"); + assert_eq!(response.status(), StatusCode::OK); + assert_eq!( + response.body().as_bytes().expect("buffered"), + b"hello world" + ); } #[test] @@ -718,19 +765,38 @@ mod tests { } #[test] - fn route_entry_clone_copies_handler() { - let entry = RouteEntry { - handler: ok_handler.into_handler(), - }; - let cloned = entry.clone(); + fn streams_body_through_router() { + use bytes::Bytes; + use futures_util::stream; + use futures_util::StreamExt as _; + + async fn handler(_ctx: RequestContext) -> Result { + let chunks = stream::iter(vec![ + Bytes::from_static(b"chunk-one\n"), + Bytes::from_static(b"chunk-two\n"), + ]); + + (StatusCode::OK, Body::stream(chunks)).into_response() + } + + let service = RouterService::builder().get("/stream", handler).build(); let request = request_builder() .method(Method::GET) - .uri("/test") + .uri("/stream") .body(Body::empty()) .expect("request"); - let ctx = RequestContext::new(request, PathParams::default()); - let response = block_on(cloned.handler.call(ctx)).expect("response"); - assert_eq!(response.status(), StatusCode::OK); + + let response = block_on(service.clone().call(request)).expect("response"); + let mut stream = response.into_body().into_stream().expect("stream body"); + let collected = block_on(async { + let mut acc = Vec::new(); + while let Some(result) = stream.next().await { + let chunk = result.expect("chunk"); + acc.extend_from_slice(&chunk); + } + acc + }); + assert_eq!(collected, b"chunk-one\nchunk-two\n"); } } diff --git a/crates/edgezero-core/src/secret_store.rs b/crates/edgezero-core/src/secret_store.rs index 5ecd699..5fbec43 100644 --- a/crates/edgezero-core/src/secret_store.rs +++ b/crates/edgezero-core/src/secret_store.rs @@ -18,6 +18,8 @@ //! it never writes or deletes them. Provisioning secrets is the //! responsibility of each platform's deployment toolchain. +#[cfg(any(test, feature = "test-utils"))] +use std::collections::HashMap; use std::fmt; use std::sync::Arc; @@ -26,13 +28,93 @@ use bytes::Bytes; use crate::error::EdgeError; +// --------------------------------------------------------------------------- +// Contract test macro +// --------------------------------------------------------------------------- + +/// Generate a suite of contract tests for any [`SecretStore`] implementation. +/// +/// The factory expression must produce a provider pre-populated with these +/// entries in the `"mystore"` store: +/// - `"contract_key"` → `Bytes::from("contract_value")` +/// - `"contract_key_2"` → `Bytes::from("another_value")` +/// - `"missing_key"` must NOT be present. +#[macro_export] +macro_rules! secret_store_contract_tests { + ($mod_name:ident, $factory:expr) => { + mod $mod_name { + use super::*; + use bytes::Bytes; + use $crate::secret_store::SecretStore; + + fn run(future: Fut) -> Fut::Output { + futures::executor::block_on(future) + } + + #[test] + fn contract_get_existing_returns_bytes() { + let provider = $factory; + run(async { + let result = provider.get_bytes("mystore", "contract_key").await.unwrap(); + assert_eq!(result, Some(Bytes::from("contract_value"))); + }); + } + + #[test] + fn contract_get_second_key_returns_bytes() { + let provider = $factory; + run(async { + let result = provider + .get_bytes("mystore", "contract_key_2") + .await + .unwrap(); + assert_eq!(result, Some(Bytes::from("another_value"))); + }); + } + + #[test] + fn contract_get_missing_returns_none() { + let provider = $factory; + run(async { + let result = provider.get_bytes("mystore", "missing_key").await.unwrap(); + assert!(result.is_none()); + }); + } + + #[test] + fn contract_wrong_store_returns_none() { + let provider = $factory; + run(async { + let result = provider + .get_bytes("other_store", "contract_key") + .await + .unwrap(); + assert!(result.is_none()); + }); + } + } + }; +} + +// --------------------------------------------------------------------------- +// Maximum name length +// --------------------------------------------------------------------------- + +/// Maximum length in bytes for any secret name or store name. +pub const MAX_NAME_LEN: usize = 512; + // --------------------------------------------------------------------------- // Error // --------------------------------------------------------------------------- /// Errors returned by secret store operations. #[derive(Debug, thiserror::Error)] +#[non_exhaustive] pub enum SecretError { + /// A general internal error. + #[error("secret store error: {0}")] + Internal(#[from] anyhow::Error), + /// The requested secret was not found. #[error("secret not found: {name}")] NotFound { name: String }, @@ -44,13 +126,10 @@ pub enum SecretError { /// A validation error (e.g., invalid secret name). #[error("validation error: {0}")] Validation(String), - - /// A general internal error. - #[error("secret store error: {0}")] - Internal(#[from] anyhow::Error), } impl From for EdgeError { + #[inline] fn from(err: SecretError) -> Self { match err { SecretError::NotFound { .. } => { @@ -67,47 +146,6 @@ impl From for EdgeError { } } -// --------------------------------------------------------------------------- -// Maximum name length -// --------------------------------------------------------------------------- - -/// Maximum length in bytes for any secret name or store name. -pub const MAX_NAME_LEN: usize = 512; - -// --------------------------------------------------------------------------- -// Multi-store provider trait -// --------------------------------------------------------------------------- - -/// Access secrets across multiple named stores. -/// -/// Platforms with a single flat namespace (env vars, in-memory test stores) -/// implement this by keying on `"{store_name}/{key}"`. -/// Platforms with named stores (Fastly, Spin) open a store-specific handle -/// per `store_name`. -#[async_trait(?Send)] -pub trait SecretStore: Send + Sync { - /// Retrieve a secret from a named store. Returns `Ok(None)` if not found. - async fn get_bytes(&self, store_name: &str, key: &str) -> Result, SecretError>; -} - -// --------------------------------------------------------------------------- -// No-op provider (test-utils) -// --------------------------------------------------------------------------- - -/// A no-op [`SecretStore`] for tests that don't need secrets. -/// -/// All reads return `None`. -#[cfg(any(test, feature = "test-utils"))] -pub struct NoopSecretStore; - -#[cfg(any(test, feature = "test-utils"))] -#[async_trait(?Send)] -impl SecretStore for NoopSecretStore { - async fn get_bytes(&self, _store_name: &str, _key: &str) -> Result, SecretError> { - Ok(None) - } -} - // --------------------------------------------------------------------------- // In-memory provider (test-utils) // --------------------------------------------------------------------------- @@ -118,17 +156,23 @@ impl SecretStore for NoopSecretStore { /// across multiple named stores. #[cfg(any(test, feature = "test-utils"))] pub struct InMemorySecretStore { - secrets: std::collections::HashMap, + secrets: HashMap, } #[cfg(any(test, feature = "test-utils"))] impl InMemorySecretStore { /// Build with entries of the form `("{store_name}/{key}", value)`. - pub fn new(entries: impl IntoIterator, impl Into)>) -> Self { + #[inline] + pub fn new(entries: I) -> Self + where + I: IntoIterator, + K: Into, + V: Into, + { Self { secrets: entries .into_iter() - .map(|(k, v)| (k.into(), v.into())) + .map(|(key, value)| (key.into(), value.into())) .collect(), } } @@ -137,12 +181,32 @@ impl InMemorySecretStore { #[cfg(any(test, feature = "test-utils"))] #[async_trait(?Send)] impl SecretStore for InMemorySecretStore { + #[inline] async fn get_bytes(&self, store_name: &str, key: &str) -> Result, SecretError> { let compound = format!("{store_name}/{key}"); Ok(self.secrets.get(&compound).cloned()) } } +// --------------------------------------------------------------------------- +// No-op provider (test-utils) +// --------------------------------------------------------------------------- + +/// A no-op [`SecretStore`] for tests that don't need secrets. +/// +/// All reads return `None`. +#[cfg(any(test, feature = "test-utils"))] +pub struct NoopSecretStore; + +#[cfg(any(test, feature = "test-utils"))] +#[async_trait(?Send)] +impl SecretStore for NoopSecretStore { + #[inline] + async fn get_bytes(&self, _store_name: &str, _key: &str) -> Result, SecretError> { + Ok(None) + } +} + // --------------------------------------------------------------------------- // Provider handle // --------------------------------------------------------------------------- @@ -156,18 +220,18 @@ pub struct SecretHandle { } impl fmt::Debug for SecretHandle { + #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SecretHandle").finish_non_exhaustive() } } impl SecretHandle { - /// Create a new handle wrapping a multi-store provider. - pub fn new(provider: Arc) -> Self { - Self { provider } - } - /// Retrieve a secret from a named store. Returns `Ok(None)` if not found. + /// + /// # Errors + /// Returns [`SecretError::Validation`] for invalid `store_name`/`key`, [`SecretError::Unavailable`] if the backend is offline, or [`SecretError::Internal`] on backend failure. + #[inline] pub async fn get_bytes( &self, store_name: &str, @@ -178,7 +242,17 @@ impl SecretHandle { self.provider.get_bytes(store_name, key).await } + /// Create a new handle wrapping a multi-store provider. + #[inline] + pub fn new(provider: Arc) -> Self { + Self { provider } + } + /// Retrieve a secret as raw bytes. Returns `SecretError::NotFound` if absent. + /// + /// # Errors + /// Returns [`SecretError::NotFound`] if the secret is absent, plus the same errors as [`SecretHandle::get_bytes`]. + #[inline] pub async fn require_bytes(&self, store_name: &str, key: &str) -> Result { self.get_bytes(store_name, key) .await? @@ -188,21 +262,42 @@ impl SecretHandle { } /// Retrieve a secret as a UTF-8 string. Returns `SecretError::NotFound` if absent. + /// + /// # Errors + /// Returns [`SecretError::Internal`] if the secret bytes are not valid UTF-8, plus the same errors as [`SecretHandle::require_bytes`]. + #[inline] pub async fn require_str(&self, store_name: &str, key: &str) -> Result { let bytes = self.require_bytes(store_name, key).await?; - String::from_utf8(bytes.into()) - .map_err(|e| SecretError::Internal(anyhow::anyhow!("secret is not valid UTF-8: {e}"))) + String::from_utf8(bytes.into()).map_err(|err| { + SecretError::Internal(anyhow::anyhow!("secret is not valid UTF-8: {err}")) + }) } } +// --------------------------------------------------------------------------- +// Multi-store provider trait +// --------------------------------------------------------------------------- + +/// Access secrets across multiple named stores. +/// +/// Platforms with a single flat namespace (env vars, in-memory test stores) +/// implement this by keying on `"{store_name}/{key}"`. +/// Platforms with named stores (Fastly, Spin) open a store-specific handle +/// per `store_name`. +#[async_trait(?Send)] +pub trait SecretStore: Send + Sync { + /// Retrieve a secret from a named store. Returns `Ok(None)` if not found. + async fn get_bytes(&self, store_name: &str, key: &str) -> Result, SecretError>; +} + // --------------------------------------------------------------------------- // Shared validation // --------------------------------------------------------------------------- -pub(crate) fn validate_name(name: &str) -> Result<(), SecretError> { +fn validate_name(name: &str) -> Result<(), SecretError> { if name.is_empty() { return Err(SecretError::Validation( - "secret name cannot be empty".to_string(), + "secret name cannot be empty".to_owned(), )); } if name.len() > MAX_NAME_LEN { @@ -212,232 +307,163 @@ pub(crate) fn validate_name(name: &str) -> Result<(), SecretError> { MAX_NAME_LEN ))); } - if name.chars().any(|c| c.is_control()) { + if name.chars().any(char::is_control) { return Err(SecretError::Validation( - "secret name contains invalid control characters".to_string(), + "secret name contains invalid control characters".to_owned(), )); } Ok(()) } -// --------------------------------------------------------------------------- -// Contract test macro -// --------------------------------------------------------------------------- - -/// Generate a suite of contract tests for any [`SecretStore`] implementation. -/// -/// The factory expression must produce a provider pre-populated with these -/// entries in the `"mystore"` store: -/// - `"contract_key"` → `Bytes::from("contract_value")` -/// - `"contract_key_2"` → `Bytes::from("another_value")` -/// - `"missing_key"` must NOT be present. -#[macro_export] -macro_rules! secret_store_contract_tests { - ($mod_name:ident, $factory:expr) => { - mod $mod_name { - use super::*; - use bytes::Bytes; - use $crate::secret_store::SecretStore; - - fn run(f: F) -> F::Output { - futures::executor::block_on(f) - } - - #[test] - fn contract_get_existing_returns_bytes() { - let provider = $factory; - run(async { - let result = provider.get_bytes("mystore", "contract_key").await.unwrap(); - assert_eq!(result, Some(Bytes::from("contract_value"))); - }); - } - - #[test] - fn contract_get_second_key_returns_bytes() { - let provider = $factory; - run(async { - let result = provider - .get_bytes("mystore", "contract_key_2") - .await - .unwrap(); - assert_eq!(result, Some(Bytes::from("another_value"))); - }); - } - - #[test] - fn contract_get_missing_returns_none() { - let provider = $factory; - run(async { - let result = provider.get_bytes("mystore", "missing_key").await.unwrap(); - assert!(result.is_none()); - }); - } - - #[test] - fn contract_wrong_store_returns_none() { - let provider = $factory; - run(async { - let result = provider - .get_bytes("other_store", "contract_key") - .await - .unwrap(); - assert!(result.is_none()); - }); - } - } - }; -} - // --------------------------------------------------------------------------- // Tests // --------------------------------------------------------------------------- #[cfg(test)] mod tests { + secret_store_contract_tests!(in_memory_provider_contract, { + InMemorySecretStore::new([ + ("mystore/contract_key", Bytes::from("contract_value")), + ("mystore/contract_key_2", Bytes::from("another_value")), + ]) + }); + use super::*; use crate::http::StatusCode; use bytes::Bytes; use futures::executor::block_on; - // ----------------------------------------------------------------------- - // SecretStoreProvider tests - // ----------------------------------------------------------------------- + fn provider_handle_with(entries: &[(&str, &str)]) -> SecretHandle { + let provider = InMemorySecretStore::new( + entries + .iter() + .map(|(key, value)| ((*key).to_owned(), Bytes::from((*value).to_owned()))), + ); + SecretHandle::new(Arc::new(provider)) + } #[test] - fn provider_in_memory_returns_value_for_existing_key() { - let provider = InMemorySecretStore::new([("store/key", Bytes::from("hello"))]); + fn noop_provider_always_returns_none() { + let provider = NoopSecretStore; block_on(async { - let result = provider.get_bytes("store", "key").await.unwrap(); - assert_eq!(result, Some(Bytes::from("hello"))); + let result = provider.get_bytes("any_store", "any_key").await.unwrap(); + assert!(result.is_none()); }); } #[test] - fn provider_in_memory_returns_none_for_missing_key() { - let provider = InMemorySecretStore::new([("store/key", Bytes::from("hello"))]); + fn provider_handle_get_bytes_returns_none_for_missing() { + let handle = provider_handle_with(&[]); block_on(async { - let result = provider.get_bytes("store", "missing").await.unwrap(); + let result = handle.get_bytes("store", "missing").await.unwrap(); assert!(result.is_none()); }); } #[test] - fn provider_in_memory_returns_none_for_wrong_store() { - let provider = InMemorySecretStore::new([("store/key", Bytes::from("hello"))]); + fn provider_handle_get_bytes_returns_value() { + let handle = provider_handle_with(&[("signing-keys/current", "abc123")]); block_on(async { - let result = provider.get_bytes("other", "key").await.unwrap(); - assert!(result.is_none()); + let result = handle.get_bytes("signing-keys", "current").await.unwrap(); + assert_eq!(result, Some(Bytes::from("abc123"))); }); } #[test] - fn noop_provider_always_returns_none() { - let provider = NoopSecretStore; + fn provider_handle_require_bytes_errors_for_missing() { + let handle = provider_handle_with(&[]); block_on(async { - let result = provider.get_bytes("any_store", "any_key").await.unwrap(); - assert!(result.is_none()); + let err = handle.require_bytes("store", "missing").await.unwrap_err(); + assert!(matches!(err, SecretError::NotFound { .. })); }); } - // ----------------------------------------------------------------------- - // SecretProviderHandle tests - // ----------------------------------------------------------------------- - - fn provider_handle_with(entries: &[(&str, &str)]) -> SecretHandle { - let provider = InMemorySecretStore::new( - entries - .iter() - .map(|(k, v)| (k.to_string(), Bytes::from(v.to_string()))), - ); - SecretHandle::new(std::sync::Arc::new(provider)) - } - #[test] - fn provider_handle_get_bytes_returns_value() { - let h = provider_handle_with(&[("signing-keys/current", "abc123")]); + fn provider_handle_require_str_returns_value() { + let handle = provider_handle_with(&[("api-keys/prod", "secret_val")]); block_on(async { - let result = h.get_bytes("signing-keys", "current").await.unwrap(); - assert_eq!(result, Some(Bytes::from("abc123"))); + let val = handle.require_str("api-keys", "prod").await.unwrap(); + assert_eq!(val, "secret_val"); }); } #[test] - fn provider_handle_get_bytes_returns_none_for_missing() { - let h = provider_handle_with(&[]); + fn provider_handle_validates_control_chars_in_key() { + let handle = provider_handle_with(&[]); block_on(async { - let result = h.get_bytes("store", "missing").await.unwrap(); - assert!(result.is_none()); + let err = handle.get_bytes("store", "bad\x00key").await.unwrap_err(); + assert!(matches!(err, SecretError::Validation(_))); }); } #[test] - fn provider_handle_require_bytes_errors_for_missing() { - let h = provider_handle_with(&[]); + fn provider_handle_validates_control_chars_in_store_name() { + let handle = provider_handle_with(&[]); block_on(async { - let err = h.require_bytes("store", "missing").await.unwrap_err(); - assert!(matches!(err, SecretError::NotFound { .. })); + let err = handle.get_bytes("bad\x00store", "key").await.unwrap_err(); + assert!(matches!(err, SecretError::Validation(_))); }); } #[test] - fn provider_handle_require_str_returns_value() { - let h = provider_handle_with(&[("api-keys/prod", "secret_val")]); + fn provider_handle_validates_empty_key() { + let handle = provider_handle_with(&[]); block_on(async { - let val = h.require_str("api-keys", "prod").await.unwrap(); - assert_eq!(val, "secret_val"); + let err = handle.get_bytes("store", "").await.unwrap_err(); + assert!(matches!(err, SecretError::Validation(_))); }); } #[test] fn provider_handle_validates_empty_store_name() { - let h = provider_handle_with(&[]); + let handle = provider_handle_with(&[]); block_on(async { - let err = h.get_bytes("", "key").await.unwrap_err(); + let err = handle.get_bytes("", "key").await.unwrap_err(); assert!(matches!(err, SecretError::Validation(_))); }); } #[test] - fn provider_handle_validates_empty_key() { - let h = provider_handle_with(&[]); + fn provider_handle_validates_oversized_name() { + let handle = provider_handle_with(&[]); block_on(async { - let err = h.get_bytes("store", "").await.unwrap_err(); + let name = "x".repeat(MAX_NAME_LEN + 1); + let err = handle.get_bytes(&name, "key").await.unwrap_err(); assert!(matches!(err, SecretError::Validation(_))); }); } #[test] - fn provider_handle_validates_control_chars_in_store_name() { - let h = provider_handle_with(&[]); + fn provider_in_memory_returns_none_for_missing_key() { + let provider = InMemorySecretStore::new([("store/key", Bytes::from("hello"))]); block_on(async { - let err = h.get_bytes("bad\x00store", "key").await.unwrap_err(); - assert!(matches!(err, SecretError::Validation(_))); + let result = provider.get_bytes("store", "missing").await.unwrap(); + assert!(result.is_none()); }); } #[test] - fn provider_handle_validates_control_chars_in_key() { - let h = provider_handle_with(&[]); + fn provider_in_memory_returns_none_for_wrong_store() { + let provider = InMemorySecretStore::new([("store/key", Bytes::from("hello"))]); block_on(async { - let err = h.get_bytes("store", "bad\x00key").await.unwrap_err(); - assert!(matches!(err, SecretError::Validation(_))); + let result = provider.get_bytes("other", "key").await.unwrap(); + assert!(result.is_none()); }); } #[test] - fn provider_handle_validates_oversized_name() { - let h = provider_handle_with(&[]); + fn provider_in_memory_returns_value_for_existing_key() { + let provider = InMemorySecretStore::new([("store/key", Bytes::from("hello"))]); block_on(async { - let name = "x".repeat(MAX_NAME_LEN + 1); - let err = h.get_bytes(&name, "key").await.unwrap_err(); - assert!(matches!(err, SecretError::Validation(_))); + let result = provider.get_bytes("store", "key").await.unwrap(); + assert_eq!(result, Some(Bytes::from("hello"))); }); } #[test] fn secret_error_not_found_does_not_leak_secret_name() { let err: EdgeError = SecretError::NotFound { - name: "API_KEY".to_string(), + name: "API_KEY".to_owned(), } .into(); assert_eq!(err.status(), StatusCode::INTERNAL_SERVER_ERROR); @@ -446,15 +472,8 @@ mod tests { #[test] fn secret_error_validation_does_not_leak_details() { - let err: EdgeError = SecretError::Validation("bad\x00name".to_string()).into(); + let err: EdgeError = SecretError::Validation("bad\x00name".to_owned()).into(); assert_eq!(err.status(), StatusCode::INTERNAL_SERVER_ERROR); assert!(!err.message().contains("bad")); } - - secret_store_contract_tests!(in_memory_provider_contract, { - InMemorySecretStore::new([ - ("mystore/contract_key", Bytes::from("contract_value")), - ("mystore/contract_key_2", Bytes::from("another_value")), - ]) - }); } diff --git a/crates/edgezero-macros/Cargo.toml b/crates/edgezero-macros/Cargo.toml index d050dc3..63c3b58 100644 --- a/crates/edgezero-macros/Cargo.toml +++ b/crates/edgezero-macros/Cargo.toml @@ -5,6 +5,9 @@ version = { workspace = true } authors = { workspace = true } license = { workspace = true } +[lints] +workspace = true + [lib] proc-macro = true diff --git a/crates/edgezero-macros/src/action.rs b/crates/edgezero-macros/src/action.rs index e905d22..92a03c6 100644 --- a/crates/edgezero-macros/src/action.rs +++ b/crates/edgezero-macros/src/action.rs @@ -1,13 +1,13 @@ use proc_macro::TokenStream; use quote::{format_ident, quote}; -use syn::{spanned::Spanned, Error, FnArg, ItemFn, Pat, PathArguments, Type}; +use syn::{spanned::Spanned as _, Error, FnArg, ItemFn, Pat, PathArguments, Type}; pub fn expand_action(attr: TokenStream, item: TokenStream) -> TokenStream { - expand_action_impl(attr.into(), item.into()).into() + expand_action_impl(&attr.into(), item.into()).into() } -pub(crate) fn expand_action_impl( - attr: proc_macro2::TokenStream, +fn expand_action_impl( + attr: &proc_macro2::TokenStream, item: proc_macro2::TokenStream, ) -> proc_macro2::TokenStream { if !attr.is_empty() { @@ -41,6 +41,12 @@ pub(crate) fn expand_action_impl( inner_fn.sig.ident = inner_ident.clone(); inner_fn.vis = syn::Visibility::Inherited; inner_fn.attrs.clear(); + // `#[action]` requires the user fn to be `async` so we can `.await` it + // from the generated outer fn. Some handler bodies have no awaits of + // their own — silence `clippy::unused_async` for those. + inner_fn + .attrs + .push(syn::parse_quote!(#[allow(clippy::unused_async)])); if let Err(err) = normalize_request_context_patterns(&mut inner_fn) { return err.to_compile_error(); @@ -53,7 +59,13 @@ pub(crate) fn expand_action_impl( for (index, arg) in func.sig.inputs.iter().enumerate() { let pat_type = match arg { FnArg::Typed(pat_type) => pat_type, - FnArg::Receiver(_) => unreachable!(), + FnArg::Receiver(receiver) => { + return syn::Error::new( + receiver.span(), + "#[action] functions cannot have a `self` receiver", + ) + .to_compile_error(); + } }; let ty = &pat_type.ty; @@ -128,17 +140,14 @@ fn extract_request_context_binding(pat: &Pat) -> syn::Result> { } fn path_is_request_context(path: &syn::Path) -> bool { - path.segments - .last() - .map(|segment| { - segment.ident == "RequestContext" && matches!(segment.arguments, PathArguments::None) - }) - .unwrap_or(false) + path.segments.last().is_some_and(|segment| { + segment.ident == "RequestContext" && matches!(segment.arguments, PathArguments::None) + }) } fn normalize_request_context_patterns(func: &mut ItemFn) -> Result<(), Error> { let mut error: Option = None; - for arg in func.sig.inputs.iter_mut() { + for arg in &mut func.sig.inputs { if let FnArg::Typed(pat_type) = arg { if is_request_context_type(&pat_type.ty) { if let Err(err) = normalize_request_context_pat(&mut pat_type.pat) { @@ -164,7 +173,7 @@ mod tests { use proc_macro2::TokenStream; use quote::quote; - fn render(tokens: TokenStream) -> String { + fn render(tokens: &TokenStream) -> String { tokens.to_string() } @@ -182,8 +191,8 @@ mod tests { .unwrap() } }; - let output = expand_action_impl(TokenStream::new(), input); - let rendered = render(output); + let output = expand_action_impl(&TokenStream::new(), input); + let rendered = render(&output); assert!(rendered.contains("__demo_inner")); assert!(rendered.contains("fn demo")); assert!(rendered.contains("responder :: Responder :: respond")); @@ -194,8 +203,8 @@ mod tests { let input = quote! { fn invalid() {} }; - let output = expand_action_impl(TokenStream::new(), input); - let rendered = render(output); + let output = expand_action_impl(&TokenStream::new(), input); + let rendered = render(&output); assert!(rendered.contains("must be async")); } @@ -206,8 +215,8 @@ mod tests { unimplemented!() } }; - let output = expand_action_impl(quote!(path = "/demo"), input); - let rendered = render(output); + let output = expand_action_impl("e!(path = "/demo"), input); + let rendered = render(&output); assert!(rendered.contains("does not accept arguments")); } @@ -218,8 +227,8 @@ mod tests { unimplemented!() } }; - let output = expand_action_impl(TokenStream::new(), input); - let rendered = render(output); + let output = expand_action_impl(&TokenStream::new(), input); + let rendered = render(&output); assert!(rendered.contains("does not support self receivers")); } @@ -239,8 +248,8 @@ mod tests { .unwrap()) } }; - let output = expand_action_impl(TokenStream::new(), input); - let rendered = render(output); + let output = expand_action_impl(&TokenStream::new(), input); + let rendered = render(&output); let collapsed = collapse_whitespace(&rendered); assert!(collapsed.contains("__with_ctx_inner(__ctx)")); } @@ -261,8 +270,8 @@ mod tests { .unwrap()) } }; - let output = expand_action_impl(TokenStream::new(), input); - let rendered = render(output); + let output = expand_action_impl(&TokenStream::new(), input); + let rendered = render(&output); let collapsed = collapse_whitespace(&rendered); assert!(collapsed.contains("__tuple_ctx_inner(__ctx)")); } @@ -275,8 +284,8 @@ mod tests { second: ::edgezero_core::context::RequestContext, ) {} }; - let output = expand_action_impl(TokenStream::new(), input); - let rendered = render(output); + let output = expand_action_impl(&TokenStream::new(), input); + let rendered = render(&output); assert!(rendered.contains("support at most one RequestContext argument")); } @@ -289,8 +298,8 @@ mod tests { unimplemented!() } }; - let output = expand_action_impl(TokenStream::new(), input); - let rendered = render(output); + let output = expand_action_impl(&TokenStream::new(), input); + let rendered = render(&output); assert!(rendered.contains("expects exactly one binding")); } @@ -307,8 +316,8 @@ mod tests { .unwrap() } }; - let output = expand_action_impl(TokenStream::new(), input); - let rendered = render(output); + let output = expand_action_impl(&TokenStream::new(), input); + let rendered = render(&output); let collapsed = collapse_whitespace(&rendered); assert!( collapsed.contains("FromRequest>::from_request"), diff --git a/crates/edgezero-macros/src/app.rs b/crates/edgezero-macros/src/app.rs index 7196d99..ab481af 100644 --- a/crates/edgezero-macros/src/app.rs +++ b/crates/edgezero-macros/src/app.rs @@ -1,3 +1,4 @@ +use crate::manifest_definitions::{Manifest, DEFAULT_CONFIG_STORE_NAME}; use proc_macro::TokenStream; use proc_macro2::{Span, TokenStream as TokenStream2}; use quote::quote; @@ -6,29 +7,126 @@ use std::fs; use std::path::PathBuf; use syn::parse::{Parse, ParseStream}; use syn::{parse_macro_input, Ident, LitStr, Token}; -use validator::Validate; - -#[allow(dead_code)] -mod manifest_definitions { - include!(concat!( - env!("CARGO_MANIFEST_DIR"), - "/../edgezero-core/src/manifest.rs" - )); +use validator::Validate as _; + +struct AppArgs { + app_ident: Option, + path: LitStr, +} + +impl Parse for AppArgs { + fn parse(input: ParseStream) -> syn::Result { + let path: LitStr = input.parse()?; + let app_ident = if input.peek(Token![,]) { + input.parse::()?; + Some(input.parse::()?) + } else { + None + }; + if !input.is_empty() { + return Err(input.error("unexpected tokens after app! macro arguments")); + } + Ok(Self { app_ident, path }) + } +} + +fn build_config_store_tokens(manifest: &Manifest) -> TokenStream2 { + let Some(config) = manifest.stores.config.as_ref() else { + return quote! { + fn config_store() -> Option<&'static edgezero_core::app::ConfigStoreMetadata> { + None + } + }; + }; + + let fallback_name = config.name.as_deref().unwrap_or(DEFAULT_CONFIG_STORE_NAME); + let fallback_name_lit = LitStr::new(fallback_name, Span::call_site()); + let override_entries: Vec<_> = config + .adapters + .iter() + .map(|(adapter, cfg)| { + let adapter_lit = LitStr::new(adapter, Span::call_site()); + let name_lit = LitStr::new(&cfg.name, Span::call_site()); + quote! { + edgezero_core::app::ConfigStoreAdapterMetadata::new(#adapter_lit, #name_lit), + } + }) + .collect(); + + quote! { + fn config_store() -> Option<&'static edgezero_core::app::ConfigStoreMetadata> { + static CONFIG_STORE: edgezero_core::app::ConfigStoreMetadata = + edgezero_core::app::ConfigStoreMetadata::new( + #fallback_name_lit, + &[ + #(#override_entries)* + ], + ); + Some(&CONFIG_STORE) + } + } +} + +fn build_middleware_tokens(manifest: &Manifest) -> Vec { + manifest + .app + .middleware + .iter() + .map(|middleware| { + let path = parse_handler_path(middleware); + quote! { + builder = builder.middleware(#path); + } + }) + .collect() +} + +fn build_route_tokens(manifest: &Manifest) -> Vec { + manifest + .triggers + .http + .iter() + .filter_map(|trigger| { + let handler = trigger.handler.as_deref()?; + let handler_path = parse_handler_path(handler); + let path_lit = LitStr::new(&trigger.path, Span::call_site()); + + let methods = trigger.methods(); + + let mut tokens = Vec::new(); + for method in methods { + let route_tokens = route_for_method(method, &path_lit, &handler_path); + tokens.push(route_tokens); + } + Some(tokens) + }) + .flatten() + .collect() } -use manifest_definitions::{Manifest, DEFAULT_CONFIG_STORE_NAME}; pub fn expand_app(input: TokenStream) -> TokenStream { let args = parse_macro_input!(input as AppArgs); let manifest_path = resolve_manifest_path(args.path.value()); - let manifest_source = fs::read_to_string(&manifest_path) - .unwrap_or_else(|err| panic!("failed to read {}: {err}", manifest_path.display())); + let manifest_source = match fs::read_to_string(&manifest_path) { + Ok(source) => source, + Err(err) => { + let msg = format!("failed to read {}: {err}", manifest_path.display()); + return quote!(compile_error!(#msg);).into(); + } + }; - let mut manifest: Manifest = toml::from_str(&manifest_source) - .unwrap_or_else(|err| panic!("failed to parse {}: {err}", manifest_path.display())); - manifest - .validate() - .unwrap_or_else(|err| panic!("failed to validate {}: {err}", manifest_path.display())); + let mut manifest: Manifest = match toml::from_str(&manifest_source) { + Ok(parsed) => parsed, + Err(err) => { + let msg = format!("failed to parse {}: {err}", manifest_path.display()); + return quote!(compile_error!(#msg);).into(); + } + }; + if let Err(err) = manifest.validate() { + let msg = format!("failed to validate {}: {err}", manifest_path.display()); + return quote!(compile_error!(#msg);).into(); + } manifest.finalize(); let app_ident = args @@ -38,7 +136,7 @@ pub fn expand_app(input: TokenStream) -> TokenStream { .app .name .clone() - .unwrap_or_else(|| "EdgeZero App".to_string()); + .unwrap_or_else(|| "EdgeZero App".to_owned()); let app_name_lit = LitStr::new(&app_name, Span::call_site()); let middleware_tokens = build_middleware_tokens(&manifest); @@ -53,11 +151,19 @@ pub fn expand_app(input: TokenStream) -> TokenStream { build_router() } + fn configure(_app: &mut edgezero_core::app::App) {} + fn name() -> &'static str { #app_name_lit } #config_store_tokens + + fn build_app() -> edgezero_core::app::App { + let mut app = edgezero_core::app::App::with_name(Self::routes(), Self::name()); + Self::configure(&mut app); + app + } } pub fn build_router() -> edgezero_core::router::RouterService { @@ -71,85 +177,20 @@ pub fn expand_app(input: TokenStream) -> TokenStream { output.into() } -fn resolve_manifest_path(relative: String) -> PathBuf { - let manifest_dir = env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR env var"); - let mut path = PathBuf::from(manifest_dir); - path.push(relative); - path -} - -fn build_route_tokens(manifest: &Manifest) -> Vec { - manifest - .triggers - .http - .iter() - .filter_map(|trigger| { - let handler = trigger.handler.as_deref()?; - let handler_path = parse_handler_path(handler); - let path_lit = LitStr::new(&trigger.path, Span::call_site()); - - let methods = trigger.methods(); - - let mut tokens = Vec::new(); - for method in methods { - let route_tokens = route_for_method(method, &path_lit, &handler_path); - tokens.push(route_tokens); - } - Some(tokens) - }) - .flatten() - .collect() -} - -fn build_middleware_tokens(manifest: &Manifest) -> Vec { - manifest - .app - .middleware - .iter() - .map(|middleware| { - let path = parse_handler_path(middleware); - quote! { - builder = builder.middleware(#path); - } - }) - .collect() -} - -fn build_config_store_tokens(manifest: &Manifest) -> TokenStream2 { - let Some(config) = manifest.stores.config.as_ref() else { - return quote! {}; - }; - - let fallback_name = config.name.as_deref().unwrap_or(DEFAULT_CONFIG_STORE_NAME); - let fallback_name_lit = LitStr::new(fallback_name, Span::call_site()); - let override_entries: Vec<_> = config - .adapters - .iter() - .map(|(adapter, cfg)| { - let adapter_lit = LitStr::new(adapter, Span::call_site()); - let name_lit = LitStr::new(&cfg.name, Span::call_site()); - quote! { - edgezero_core::app::ConfigStoreAdapterMetadata::new(#adapter_lit, #name_lit), - } - }) - .collect(); - - quote! { - fn config_store() -> Option<&'static edgezero_core::app::ConfigStoreMetadata> { - static CONFIG_STORE: edgezero_core::app::ConfigStoreMetadata = - edgezero_core::app::ConfigStoreMetadata::new( - #fallback_name_lit, - &[ - #(#override_entries)* - ], - ); - Some(&CONFIG_STORE) - } - } -} - +/// Parses a handler reference like `crate::handlers::root` from `edgezero.toml` +/// into the `syn::ExprPath` that the generated router code references. +/// +/// Called at proc-macro expansion time. If the user's manifest contains a +/// syntactically-invalid handler path, the only useful recovery is to halt +/// macro expansion with a clear message — there is no runtime to propagate +/// the error to. The panic is caught by `rustc` and surfaces as a normal +/// build failure with the file/line of the call site. +#[expect( + clippy::panic, + reason = "macro-expansion-time error: rustc surfaces the panic as a build failure" +)] fn parse_handler_path(handler: &str) -> syn::ExprPath { - let mut handler_str = handler.trim().to_string(); + let mut handler_str = handler.trim().to_owned(); if handler_str.starts_with("crate::") || handler_str.starts_with("self::") || handler_str.starts_with("super::") @@ -159,13 +200,36 @@ fn parse_handler_path(handler: &str) -> syn::ExprPath { let crate_name = env::var("CARGO_PKG_NAME") .map(|name| name.replace('-', "_")) .unwrap_or_default(); - if !crate_name.is_empty() && handler_str.starts_with(&(crate_name.clone() + "::")) { - handler_str = format!("crate::{}", &handler_str[crate_name.len() + 2..]); + if !crate_name.is_empty() && handler_str.starts_with(&format!("{crate_name}::")) { + handler_str = format!( + "crate::{}", + handler_str + .get(crate_name.len().saturating_add(2)..) + .unwrap_or_default(), + ); } } syn::parse_str::(&handler_str) - .unwrap_or_else(|err| panic!("invalid handler path `{}`: {err}", handler)) + .unwrap_or_else(|err| panic!("invalid handler path `{handler}`: {err}")) +} + +/// Resolves the manifest path passed to `app!(...)` against the +/// invoking crate's `CARGO_MANIFEST_DIR`. +/// +/// `CARGO_MANIFEST_DIR` is unconditionally set by Cargo whenever a +/// proc-macro runs against a normal crate, so the lookup cannot fail in +/// practice. Treating it as fallible would require every caller of +/// `app!(...)` to handle an outcome that has never been observed and +/// cannot be triggered without bypassing Cargo entirely. +#[expect( + clippy::expect_used, + reason = "CARGO_MANIFEST_DIR is a Cargo invariant during macro expansion; \ + there is no realistic failure mode to propagate" +)] +fn resolve_manifest_path(relative: String) -> PathBuf { + let manifest_dir = env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR env var"); + PathBuf::from(manifest_dir).join(relative) } fn route_for_method(method: &str, path: &LitStr, handler: &syn::ExprPath) -> TokenStream2 { @@ -187,24 +251,3 @@ fn route_for_method(method: &str, path: &LitStr, handler: &syn::ExprPath) -> Tok } } } - -struct AppArgs { - path: LitStr, - app_ident: Option, -} - -impl Parse for AppArgs { - fn parse(input: ParseStream) -> syn::Result { - let path: LitStr = input.parse()?; - let app_ident = if input.peek(Token![,]) { - input.parse::()?; - Some(input.parse::()?) - } else { - None - }; - if !input.is_empty() { - return Err(input.error("unexpected tokens after app! macro arguments")); - } - Ok(Self { path, app_ident }) - } -} diff --git a/crates/edgezero-macros/src/lib.rs b/crates/edgezero-macros/src/lib.rs index 4e85147..259b116 100644 --- a/crates/edgezero-macros/src/lib.rs +++ b/crates/edgezero-macros/src/lib.rs @@ -1,5 +1,6 @@ mod action; mod app; +mod manifest_definitions; use proc_macro::TokenStream; diff --git a/crates/edgezero-macros/src/manifest_definitions.rs b/crates/edgezero-macros/src/manifest_definitions.rs new file mode 100644 index 0000000..4687b78 --- /dev/null +++ b/crates/edgezero-macros/src/manifest_definitions.rs @@ -0,0 +1,13 @@ +// Many manifest fields exist for downstream consumers (CLI, runtime +// adapters, etc.) but are unused inside the proc-macro itself, which only +// reads enough of the structure to generate routing. Allow `dead_code` so +// those fields don't trip warnings just because the macro doesn't touch them. +#![allow( + dead_code, + reason = "macro-side reads only the routing-relevant fields" +)] + +include!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/../edgezero-core/src/manifest.rs" +)); diff --git a/examples/app-demo/Cargo.lock b/examples/app-demo/Cargo.lock index fc3583b..846359f 100644 --- a/examples/app-demo/Cargo.lock +++ b/examples/app-demo/Cargo.lock @@ -566,6 +566,7 @@ dependencies = [ "futures-util", "log", "log-fastly", + "thiserror 2.0.18", ] [[package]] diff --git a/examples/app-demo/Cargo.toml b/examples/app-demo/Cargo.toml index f702329..ba14fbd 100644 --- a/examples/app-demo/Cargo.toml +++ b/examples/app-demo/Cargo.toml @@ -38,3 +38,33 @@ worker = { version = "0.8", default-features = false, features = ["http"] } debug = 1 codegen-units = 1 lto = "fat" + +[workspace.lints.clippy] +# Same strict gate as the main workspace. Allow-list mirrors the parent +# `Cargo.toml` only where the demo legitimately needs the same exemption — +# new entries should be added lazily when a real failure surfaces. +pedantic = { level = "warn", priority = -1 } +restriction = { level = "deny", priority = -1 } + +# Meta — required when enabling `restriction` as a group. +blanket_clippy_restriction_lints = "allow" + +# Documentation — demo is illustrative; private items don't need full docs. +missing_docs_in_private_items = "allow" + +# Style / formatting — match the main workspace's idiomatic-Rust stance. +implicit_return = "allow" +question_mark_used = "allow" +single_call_fn = "allow" +separated_literal_suffix = "allow" + +# API design — `exhaustive_structs` fires once on the unit struct generated +# by the `app!` macro. +exhaustive_structs = "allow" + +# Imports / paths — demo binaries are std applications, not no_std libraries. +std_instead_of_alloc = "allow" +std_instead_of_core = "allow" + +[workspace.lints.rust] +unsafe_code = "deny" diff --git a/examples/app-demo/clippy.toml b/examples/app-demo/clippy.toml new file mode 100644 index 0000000..99dd0fd --- /dev/null +++ b/examples/app-demo/clippy.toml @@ -0,0 +1,9 @@ +# Clippy configuration. See https://doc.rust-lang.org/clippy/lint_configuration.html +# +# Test code uses `.unwrap()`, `.expect()`, `panic!`, `assert!`, indexing, and +# other "if-this-fails-the-test-fails" idioms by convention. Mirror the main +# workspace and exempt tests from the corresponding restriction lints. +allow-expect-in-tests = true +allow-indexing-slicing-in-tests = true +allow-panic-in-tests = true +allow-unwrap-in-tests = true diff --git a/examples/app-demo/crates/app-demo-adapter-axum/Cargo.toml b/examples/app-demo/crates/app-demo-adapter-axum/Cargo.toml index 5645499..3f0621d 100644 --- a/examples/app-demo/crates/app-demo-adapter-axum/Cargo.toml +++ b/examples/app-demo/crates/app-demo-adapter-axum/Cargo.toml @@ -5,6 +5,9 @@ edition = "2021" license.workspace = true publish = false +[lints] +workspace = true + [[bin]] name = "app-demo-adapter-axum" path = "src/main.rs" diff --git a/examples/app-demo/crates/app-demo-adapter-axum/src/main.rs b/examples/app-demo/crates/app-demo-adapter-axum/src/main.rs index da4b61b..de27e4e 100644 --- a/examples/app-demo/crates/app-demo-adapter-axum/src/main.rs +++ b/examples/app-demo/crates/app-demo-adapter-axum/src/main.rs @@ -1,9 +1,5 @@ use app_demo_core::App; -fn main() { - if let Err(err) = edgezero_adapter_axum::run_app::(include_str!("../../../edgezero.toml")) - { - eprintln!("axum adapter failed: {err}"); - std::process::exit(1); - } +fn main() -> anyhow::Result<()> { + edgezero_adapter_axum::dev_server::run_app::(include_str!("../../../edgezero.toml")) } diff --git a/examples/app-demo/crates/app-demo-adapter-cloudflare/Cargo.toml b/examples/app-demo/crates/app-demo-adapter-cloudflare/Cargo.toml index fd040e1..9bba19d 100644 --- a/examples/app-demo/crates/app-demo-adapter-cloudflare/Cargo.toml +++ b/examples/app-demo/crates/app-demo-adapter-cloudflare/Cargo.toml @@ -5,6 +5,9 @@ edition = "2021" license.workspace = true publish = false +[lints] +workspace = true + [[bin]] name = "app-demo-adapter-cloudflare" path = "src/main.rs" diff --git a/examples/app-demo/crates/app-demo-adapter-cloudflare/src/main.rs b/examples/app-demo/crates/app-demo-adapter-cloudflare/src/main.rs index 910a2cb..96d0dbf 100644 --- a/examples/app-demo/crates/app-demo-adapter-cloudflare/src/main.rs +++ b/examples/app-demo/crates/app-demo-adapter-cloudflare/src/main.rs @@ -1,3 +1,7 @@ +#[expect( + clippy::print_stderr, + reason = "host stub; the real binary only runs on wasm32-unknown-unknown" +)] fn main() { eprintln!( "Run `wrangler dev` or target wasm32-unknown-unknown to execute app-demo-adapter-cloudflare." diff --git a/examples/app-demo/crates/app-demo-adapter-fastly/Cargo.toml b/examples/app-demo/crates/app-demo-adapter-fastly/Cargo.toml index 4f365ec..e4a259a 100644 --- a/examples/app-demo/crates/app-demo-adapter-fastly/Cargo.toml +++ b/examples/app-demo/crates/app-demo-adapter-fastly/Cargo.toml @@ -5,6 +5,9 @@ edition = "2021" license.workspace = true publish = false +[lints] +workspace = true + [[bin]] name = "app-demo-adapter-fastly" path = "src/main.rs" diff --git a/examples/app-demo/crates/app-demo-adapter-fastly/src/main.rs b/examples/app-demo/crates/app-demo-adapter-fastly/src/main.rs index f81b984..8f6ad39 100644 --- a/examples/app-demo/crates/app-demo-adapter-fastly/src/main.rs +++ b/examples/app-demo/crates/app-demo-adapter-fastly/src/main.rs @@ -1,4 +1,7 @@ -#![cfg_attr(not(target_arch = "wasm32"), allow(dead_code))] +#![cfg_attr( + not(target_arch = "wasm32"), + allow(dead_code, reason = "Fastly entrypoint is wasm32-only") +)] #[cfg(target_arch = "wasm32")] use app_demo_core::App; @@ -11,6 +14,10 @@ pub fn main(req: Request) -> Result { } #[cfg(not(target_arch = "wasm32"))] +#[expect( + clippy::print_stderr, + reason = "host stub; the real binary only runs on wasm32-wasip1" +)] fn main() { eprintln!("app-demo-adapter-fastly: target wasm32-wasip1 to run on Fastly."); } diff --git a/examples/app-demo/crates/app-demo-adapter-spin/Cargo.toml b/examples/app-demo/crates/app-demo-adapter-spin/Cargo.toml index b18a924..c5df0d0 100644 --- a/examples/app-demo/crates/app-demo-adapter-spin/Cargo.toml +++ b/examples/app-demo/crates/app-demo-adapter-spin/Cargo.toml @@ -5,6 +5,9 @@ edition = "2021" license.workspace = true publish = false +[lints] +workspace = true + [lib] crate-type = ["cdylib"] path = "src/lib.rs" diff --git a/examples/app-demo/crates/app-demo-core/Cargo.toml b/examples/app-demo/crates/app-demo-core/Cargo.toml index 91c2281..3c96c8a 100644 --- a/examples/app-demo/crates/app-demo-core/Cargo.toml +++ b/examples/app-demo/crates/app-demo-core/Cargo.toml @@ -5,6 +5,9 @@ edition = "2021" license.workspace = true publish = false +[lints] +workspace = true + [dependencies] bytes = { workspace = true } edgezero-core = { workspace = true } diff --git a/examples/app-demo/crates/app-demo-core/src/handlers.rs b/examples/app-demo/crates/app-demo-core/src/handlers.rs index fb65b39..061b52c 100644 --- a/examples/app-demo/crates/app-demo-core/src/handlers.rs +++ b/examples/app-demo/crates/app-demo-core/src/handlers.rs @@ -1,3 +1,5 @@ +use std::env; + use bytes::Bytes; use edgezero_core::action; use edgezero_core::body::Body; @@ -7,18 +9,17 @@ use edgezero_core::extractor::{Headers, Json, Kv, Path, Query, Secrets, Validate use edgezero_core::http::{self, Response, StatusCode, Uri}; use edgezero_core::proxy::ProxyRequest; use edgezero_core::response::Text; -use futures::{stream, StreamExt}; +use futures::{stream, StreamExt as _}; -const DEFAULT_PROXY_BASE: &str = "https://httpbin.org"; const ALLOWED_CONFIG_KEYS: &[&str] = &["greeting", "feature.new_checkout", "service.timeout_ms"]; -const SMOKE_SECRET_NAME: &str = "SMOKE_SECRET"; -const SMOKE_SECRET_MISSING_NAME: &str = "SMOKE_SECRET_MISSING"; +const DEFAULT_PROXY_BASE: &str = "https://httpbin.org"; +/// Maximum request body size (25 MB, matches KV value limit). +const MAX_BODY_SIZE: usize = 25 * 1024 * 1024; +// 512 (KV key limit) - 5 (len of "note:") = 507 +const MAX_NOTE_ID_LEN: u64 = 507; const SECRET_STORE_NAME: &str = "EDGEZERO_SECRETS"; - -#[derive(serde::Deserialize)] -pub(crate) struct EchoParams { - pub(crate) name: String, -} +const SMOKE_SECRET_MISSING_NAME: &str = "SMOKE_SECRET_MISSING"; +const SMOKE_SECRET_NAME: &str = "SMOKE_SECRET"; #[derive(serde::Deserialize)] struct ConfigParams { @@ -26,74 +27,74 @@ struct ConfigParams { } #[derive(serde::Deserialize)] -pub(crate) struct EchoBody { - pub(crate) name: String, +pub struct EchoBody { + pub name: String, } #[derive(serde::Deserialize)] -struct ProxyPath { - #[serde(default)] - rest: String, +pub struct EchoParams { + pub name: String, } -// 512 (KV key limit) - 5 (len of "note:") = 507 -const MAX_NOTE_ID_LEN: u64 = 507; - #[derive(serde::Deserialize, validator::Validate)] -pub(crate) struct NoteIdPath { +pub struct NoteIdPath { #[validate(length( - min = 1, + min = 1_u64, max = "MAX_NOTE_ID_LEN", message = "note id must be 1–507 bytes" ))] - pub(crate) id: String, + pub id: String, } -/// Maximum request body size (25 MB, matches KV value limit). -const MAX_BODY_SIZE: usize = 25 * 1024 * 1024; +#[derive(serde::Deserialize)] +struct ProxyPath { + #[serde(default)] + rest: String, +} #[action] -pub(crate) async fn root() -> Text<&'static str> { +pub async fn root() -> Text<&'static str> { Text::new("app-demo app") } #[action] -pub(crate) async fn echo(Path(params): Path) -> Text { +pub async fn echo(Path(params): Path) -> Text { Text::new(format!("Hello, {}!", params.name)) } #[action] -pub(crate) async fn headers(Headers(headers): Headers) -> Text { +pub async fn headers(Headers(headers): Headers) -> Text { let ua = headers .get("user-agent") .and_then(|value| value.to_str().ok()) .unwrap_or("(unknown)"); - Text::new(format!("ua={}", ua)) + Text::new(format!("ua={ua}")) } #[action] -pub(crate) async fn stream() -> Response { - let body = - Body::stream(stream::iter(0..3).map(|index| Bytes::from(format!("chunk {}\n", index)))); +pub async fn stream() -> Result { + let body = Body::stream( + stream::iter(0_i32..3_i32).map(|index| Bytes::from(format!("chunk {index}\n"))), + ); http::response_builder() .status(StatusCode::OK) .header("content-type", "text/plain; charset=utf-8") .body(body) - .expect("static stream response") + .map_err(EdgeError::internal) } #[action] -pub(crate) async fn echo_json(Json(body): Json) -> Text { +pub async fn echo_json(Json(body): Json) -> Text { Text::new(format!("Hello, {}!", body.name)) } #[action] -pub(crate) async fn proxy_demo(RequestContext(ctx): RequestContext) -> Result { +pub async fn proxy_demo(RequestContext(ctx): RequestContext) -> Result { let params: ProxyPath = ctx.path()?; let proxy_handle = ctx.proxy_handle(); let request = ctx.into_request(); - let base = std::env::var("API_BASE_URL").unwrap_or_else(|_| DEFAULT_PROXY_BASE.to_string()); + let base = env::var("API_BASE_URL").unwrap_or_else(|_| DEFAULT_PROXY_BASE.to_owned()); let target = build_proxy_target(&base, ¶ms.rest, request.uri())?; let proxy_request = ProxyRequest::from_request(request, target); @@ -105,7 +106,7 @@ pub(crate) async fn proxy_demo(RequestContext(ctx): RequestContext) -> Result Result { - let mut target = base.trim_end_matches('/').to_string(); + let mut target = base.trim_end_matches('/').to_owned(); let trimmed_rest = rest.trim_start_matches('/'); if !trimmed_rest.is_empty() { target.push('/'); @@ -144,7 +145,7 @@ fn text_response(status: StatusCode, message: impl Into) -> Result Result { +pub async fn config_get(RequestContext(ctx): RequestContext) -> Result { let params: ConfigParams = ctx.path()?; if !ALLOWED_CONFIG_KEYS.contains(¶ms.name.as_str()) { return text_response( @@ -171,9 +172,9 @@ pub(crate) async fn config_get(RequestContext(ctx): RequestContext) -> Result Result { +pub async fn kv_counter(Kv(store): Kv) -> Result { let count: i64 = store - .read_modify_write("demo:counter", 0i64, |n| n + 1) + .read_modify_write("demo:counter", 0_i64, |n| n.wrapping_add(1)) .await?; let body = serde_json::json!({ "count": count }).to_string(); http::response_builder() @@ -185,7 +186,7 @@ pub(crate) async fn kv_counter(Kv(store): Kv) -> Result { /// Store a note by id (body = note text). #[action] -pub(crate) async fn kv_note_put( +pub async fn kv_note_put( Kv(store): Kv, ValidatedPath(path): ValidatedPath, RequestContext(ctx): RequestContext, @@ -203,7 +204,7 @@ pub(crate) async fn kv_note_put( /// Read a note by id. #[action] -pub(crate) async fn kv_note_get( +pub async fn kv_note_get( Kv(store): Kv, ValidatedPath(path): ValidatedPath, ) -> Result { @@ -219,7 +220,7 @@ pub(crate) async fn kv_note_get( /// Delete a note by id. #[action] -pub(crate) async fn kv_note_delete( +pub async fn kv_note_delete( Kv(store): Kv, ValidatedPath(path): ValidatedPath, ) -> Result { @@ -239,9 +240,9 @@ pub(crate) async fn kv_note_delete( /// Echo the value of an allowlisted smoke-test secret from the configured store. /// -/// Usage: GET /secrets/echo?name=SMOKE_SECRET +/// Usage: `GET /secrets/echo?name=SMOKE_SECRET` #[action] -pub(crate) async fn secrets_echo( +pub async fn secrets_echo( Secrets(store): Secrets, Query(params): Query, ) -> Result, EdgeError> { @@ -273,69 +274,104 @@ mod tests { use edgezero_core::key_value_store::{KvError, KvHandle, KvPage, KvStore}; use edgezero_core::params::PathParams; use edgezero_core::proxy::{ProxyClient, ProxyHandle, ProxyResponse}; - use edgezero_core::response::IntoResponse; - use futures::{executor::block_on, StreamExt}; + use edgezero_core::response::IntoResponse as _; + use edgezero_core::secret_store::{InMemorySecretStore, SecretHandle}; + use futures::executor::block_on; use std::collections::{BTreeMap, HashMap}; use std::sync::{Arc, Mutex}; + use std::time::Duration; - #[test] - fn root_returns_static_body() { - let ctx = empty_context("/"); - let response = block_on(root(ctx)).expect("handler ok").into_response(); - let bytes = response.into_body().into_bytes(); - assert_eq!(bytes.as_ref(), b"app-demo app"); - } + struct MapConfigStore(HashMap); - #[test] - fn echo_formats_name_from_path() { - let ctx = context_with_params("/echo/alice", &[("name", "alice")]); - let response = block_on(echo(ctx)).expect("handler ok").into_response(); - let bytes = response.into_body().into_bytes(); - assert_eq!(bytes.as_ref(), b"Hello, alice!"); + struct MockKv { + data: Mutex>, } - #[test] - fn headers_reports_user_agent() { - let ctx = context_with_header( - "/headers", - HeaderName::from_static("user-agent"), - HeaderValue::from_static("DemoAgent"), - ); + struct TestProxyClient; - let response = block_on(headers(ctx)).expect("handler ok").into_response(); - let bytes = response.into_body().into_bytes(); - assert_eq!(bytes.as_ref(), b"ua=DemoAgent"); - } + struct UnavailableConfigStore; - #[test] - fn stream_emits_expected_chunks() { - let ctx = empty_context("/stream"); - let response = block_on(stream(ctx)).expect("handler ok"); - assert_eq!(response.status(), StatusCode::OK); + impl ConfigStore for MapConfigStore { + fn get(&self, key: &str) -> Result, ConfigStoreError> { + Ok(self.0.get(key).cloned()) + } + } - let mut chunks = response.into_body().into_stream().expect("stream body"); - let collected = block_on(async { - let mut buf = Vec::new(); - while let Some(chunk) = chunks.next().await { - let chunk = chunk.expect("chunk"); - buf.extend_from_slice(&chunk); + impl MockKv { + fn new() -> Self { + Self { + data: Mutex::new(BTreeMap::new()), } - buf - }); - assert_eq!( - String::from_utf8(collected).expect("utf8"), - "chunk 0\nchunk 1\nchunk 2\n" - ); + } } - #[test] - fn echo_json_formats_payload() { - let ctx = context_with_json("/echo", r#"{"name":"Edge"}"#); - let response = block_on(echo_json(ctx)) - .expect("handler ok") - .into_response(); - let bytes = response.into_body().into_bytes(); - assert_eq!(bytes.as_ref(), b"Hello, Edge!"); + #[async_trait(?Send)] + impl KvStore for MockKv { + async fn delete(&self, key: &str) -> Result<(), KvError> { + self.data.lock().unwrap().remove(key); + Ok(()) + } + + async fn exists(&self, key: &str) -> Result { + Ok(self.data.lock().unwrap().contains_key(key)) + } + + async fn get_bytes(&self, key: &str) -> Result, KvError> { + Ok(self.data.lock().unwrap().get(key).cloned()) + } + + async fn list_keys_page( + &self, + prefix: &str, + cursor: Option<&str>, + limit: usize, + ) -> Result { + let data = self.data.lock().unwrap(); + let mut keys = data + .keys() + .filter(|key| { + key.starts_with(prefix) && cursor.is_none_or(|cur| key.as_str() > cur) + }) + .cloned() + .collect::>(); + let has_more = keys.len() > limit; + keys.truncate(limit); + + Ok(KvPage { + cursor: has_more.then(|| keys.last().cloned()).flatten(), + keys, + }) + } + + async fn put_bytes(&self, key: &str, value: Bytes) -> Result<(), KvError> { + self.data.lock().unwrap().insert(key.to_owned(), value); + Ok(()) + } + + async fn put_bytes_with_ttl( + &self, + key: &str, + value: Bytes, + _ttl: Duration, + ) -> Result<(), KvError> { + self.data.lock().unwrap().insert(key.to_owned(), value); + Ok(()) + } + } + + #[async_trait(?Send)] + impl ProxyClient for TestProxyClient { + async fn send(&self, request: ProxyRequest) -> Result { + let (_method, uri, _headers, _body, _) = request.into_parts(); + assert!(uri.to_string().contains("status/201")); + Ok(ProxyResponse::new(StatusCode::CREATED, Body::empty())) + } + } + + impl ConfigStore for UnavailableConfigStore { + fn get(&self, _key: &str) -> Result, ConfigStoreError> { + Err(ConfigStoreError::unavailable("backend offline")) + } } #[test] @@ -350,51 +386,115 @@ mod tests { } #[test] - fn proxy_demo_without_handle_returns_placeholder() { - let ctx = context_with_params("/proxy/status/200", &[("rest", "status/200")]); - let response = block_on(proxy_demo(ctx)).expect("response"); - assert_eq!(response.status(), StatusCode::NOT_IMPLEMENTED); + fn config_get_returns_404_for_keys_outside_demo_allowlist() { + let ctx = context_with_config_key("missing.key", &[("missing.key", "value")]); + let response = block_on(config_get(ctx)).expect("handler ok"); + assert_eq!(response.status(), StatusCode::NOT_FOUND); } - struct TestProxyClient; + #[test] + fn config_get_returns_404_when_key_not_in_allowlist() { + let ctx = context_with_config_key("missing.key", &[("other.key", "value")]); + let response = block_on(config_get(ctx)).expect("handler ok"); + assert_eq!(response.status(), StatusCode::NOT_FOUND); + } - #[async_trait(?Send)] - impl ProxyClient for TestProxyClient { - async fn send(&self, request: ProxyRequest) -> Result { - let (_method, uri, _headers, _body, _) = request.into_parts(); - assert!(uri.to_string().contains("status/201")); - Ok(ProxyResponse::new(StatusCode::CREATED, Body::empty())) - } + #[test] + fn config_get_returns_404_when_key_not_in_store() { + let ctx = context_with_config_key("greeting", &[("other_key", "value")]); + let response = block_on(config_get(ctx)).expect("handler ok"); + assert_eq!(response.status(), StatusCode::NOT_FOUND); } #[test] - fn proxy_demo_uses_injected_handle() { + fn config_get_returns_503_when_no_store_injected() { + let ctx = context_with_params("/config/greeting", &[("name", "greeting")]); + let response = block_on(config_get(ctx)).expect("handler ok"); + assert_eq!(response.status(), StatusCode::SERVICE_UNAVAILABLE); + } + + #[test] + fn config_get_returns_503_when_store_lookup_fails() { + let ctx = context_with_unavailable_config_store("greeting"); + let err = block_on(config_get(ctx)).expect_err("expected store error"); + assert_eq!(err.status(), StatusCode::SERVICE_UNAVAILABLE); + } + + #[test] + fn config_get_returns_value_when_key_exists() { + let ctx = context_with_config_key("greeting", &[("greeting", "hello from config store")]); + let response = block_on(config_get(ctx)).expect("handler ok"); + assert_eq!(response.status(), StatusCode::OK); + assert_eq!( + response + .into_body() + .into_bytes() + .expect("buffered") + .as_ref(), + b"hello from config store" + ); + } + + fn context_with_config_key(key: &str, entries: &[(&str, &str)]) -> RequestContext { let mut request = request_builder() .method(Method::GET) - .uri("/proxy/status/201") + .uri(format!("/config/{key}")) .body(Body::empty()) .expect("request"); + let store = MapConfigStore( + entries + .iter() + .map(|&(name, value)| (name.to_owned(), value.to_owned())) + .collect(), + ); request .extensions_mut() - .insert(ProxyHandle::with_client(TestProxyClient)); - + .insert(ConfigStoreHandle::new(Arc::new(store))); let mut params = HashMap::new(); - params.insert("rest".to_string(), "status/201".to_string()); - let ctx = RequestContext::new(request, PathParams::new(params)); - - let response = block_on(proxy_demo(ctx)).expect("response"); - assert_eq!(response.status(), StatusCode::CREATED); + params.insert("name".to_owned(), key.to_owned()); + RequestContext::new(request, PathParams::new(params)) } - fn empty_context(path: &str) -> RequestContext { - let request = request_builder() + fn context_with_header(path: &str, header: HeaderName, value: HeaderValue) -> RequestContext { + let mut request = request_builder() .method(Method::GET) .uri(path) .body(Body::empty()) .expect("request"); + request.headers_mut().insert(header, value); + RequestContext::new(request, PathParams::default()) + } + + fn context_with_json(path: &str, json: &str) -> RequestContext { + let request = request_builder() + .method(Method::POST) + .uri(path) + .body(Body::from(json)) + .expect("request"); RequestContext::new(request, PathParams::default()) } + fn context_with_kv( + path: &str, + method: Method, + body: Body, + params: &[(&str, &str)], + ) -> (RequestContext, KvHandle) { + let kv = Arc::new(MockKv::new()); + let handle = KvHandle::new(kv); + let mut request = request_builder() + .method(method) + .uri(path) + .body(body) + .expect("request"); + request.extensions_mut().insert(handle.clone()); + let map = params + .iter() + .map(|&(key, value)| (key.to_owned(), value.to_owned())) + .collect::>(); + (RequestContext::new(request, PathParams::new(map)), handle) + } + fn context_with_params(path: &str, params: &[(&str, &str)]) -> RequestContext { let request = request_builder() .method(Method::GET) @@ -403,66 +503,29 @@ mod tests { .expect("request"); let map = params .iter() - .map(|(k, v)| ((*k).to_string(), (*v).to_string())) + .map(|&(key, value)| (key.to_owned(), value.to_owned())) .collect::>(); RequestContext::new(request, PathParams::new(map)) } - fn context_with_header(path: &str, header: HeaderName, value: HeaderValue) -> RequestContext { + fn context_with_secrets(path: &str, query: &str, entries: &[(&str, &str)]) -> RequestContext { + let provider = InMemorySecretStore::new(entries.iter().map(|&(name, value)| { + ( + format!("{SECRET_STORE_NAME}/{name}"), + bytes::Bytes::from(value.to_owned()), + ) + })); + let handle = SecretHandle::new(Arc::new(provider)); + let uri = format!("{path}?{query}"); let mut request = request_builder() .method(Method::GET) - .uri(path) + .uri(uri.as_str()) .body(Body::empty()) .expect("request"); - request.headers_mut().insert(header, value); + request.extensions_mut().insert(handle); RequestContext::new(request, PathParams::default()) } - fn context_with_json(path: &str, json: &str) -> RequestContext { - let request = request_builder() - .method(Method::POST) - .uri(path) - .body(Body::from(json)) - .expect("request"); - RequestContext::new(request, PathParams::default()) - } - - struct MapConfigStore(HashMap); - - impl ConfigStore for MapConfigStore { - fn get(&self, key: &str) -> Result, ConfigStoreError> { - Ok(self.0.get(key).cloned()) - } - } - - struct UnavailableConfigStore; - - impl ConfigStore for UnavailableConfigStore { - fn get(&self, _key: &str) -> Result, ConfigStoreError> { - Err(ConfigStoreError::unavailable("backend offline")) - } - } - - fn context_with_config_key(key: &str, entries: &[(&str, &str)]) -> RequestContext { - let mut request = request_builder() - .method(Method::GET) - .uri(format!("/config/{key}")) - .body(Body::empty()) - .expect("request"); - let store = MapConfigStore( - entries - .iter() - .map(|(k, v)| (k.to_string(), v.to_string())) - .collect(), - ); - request - .extensions_mut() - .insert(ConfigStoreHandle::new(Arc::new(store))); - let mut params = HashMap::new(); - params.insert("name".to_string(), key.to_string()); - RequestContext::new(request, PathParams::new(params)) - } - fn context_with_unavailable_config_store(key: &str) -> RequestContext { let mut request = request_builder() .method(Method::GET) @@ -473,137 +536,55 @@ mod tests { .extensions_mut() .insert(ConfigStoreHandle::new(Arc::new(UnavailableConfigStore))); let mut params = HashMap::new(); - params.insert("name".to_string(), key.to_string()); + params.insert("name".to_owned(), key.to_owned()); RequestContext::new(request, PathParams::new(params)) } #[test] - fn config_get_returns_value_when_key_exists() { - let ctx = context_with_config_key("greeting", &[("greeting", "hello from config store")]); - let response = block_on(config_get(ctx)).expect("handler ok"); - assert_eq!(response.status(), StatusCode::OK); - assert_eq!( - response.into_body().into_bytes().as_ref(), - b"hello from config store" - ); - } - - #[test] - fn config_get_returns_404_when_key_not_in_allowlist() { - let ctx = context_with_config_key("missing.key", &[("other.key", "value")]); - let response = block_on(config_get(ctx)).expect("handler ok"); - assert_eq!(response.status(), StatusCode::NOT_FOUND); - } - - #[test] - fn config_get_returns_404_when_key_not_in_store() { - let ctx = context_with_config_key("greeting", &[("other_key", "value")]); - let response = block_on(config_get(ctx)).expect("handler ok"); - assert_eq!(response.status(), StatusCode::NOT_FOUND); + fn echo_formats_name_from_path() { + let ctx = context_with_params("/echo/alice", &[("name", "alice")]); + let response = block_on(echo(ctx)) + .expect("handler ok") + .into_response() + .expect("response"); + let bytes = response.into_body().into_bytes().expect("buffered"); + assert_eq!(bytes.as_ref(), b"Hello, alice!"); } #[test] - fn config_get_returns_404_for_keys_outside_demo_allowlist() { - let ctx = context_with_config_key("missing.key", &[("missing.key", "value")]); - let response = block_on(config_get(ctx)).expect("handler ok"); - assert_eq!(response.status(), StatusCode::NOT_FOUND); + fn echo_json_formats_payload() { + let ctx = context_with_json("/echo", r#"{"name":"Edge"}"#); + let response = block_on(echo_json(ctx)) + .expect("handler ok") + .into_response() + .expect("response"); + let bytes = response.into_body().into_bytes().expect("buffered"); + assert_eq!(bytes.as_ref(), b"Hello, Edge!"); } - #[test] - fn config_get_returns_503_when_no_store_injected() { - let ctx = context_with_params("/config/greeting", &[("name", "greeting")]); - let response = block_on(config_get(ctx)).expect("handler ok"); - assert_eq!(response.status(), StatusCode::SERVICE_UNAVAILABLE); + fn empty_context(path: &str) -> RequestContext { + let request = request_builder() + .method(Method::GET) + .uri(path) + .body(Body::empty()) + .expect("request"); + RequestContext::new(request, PathParams::default()) } #[test] - fn config_get_returns_503_when_store_lookup_fails() { - let ctx = context_with_unavailable_config_store("greeting"); - let err = block_on(config_get(ctx)).expect_err("expected store error"); - assert_eq!(err.status(), StatusCode::SERVICE_UNAVAILABLE); - } - - struct MockKv { - data: Mutex>, - } - - impl MockKv { - fn new() -> Self { - Self { - data: Mutex::new(BTreeMap::new()), - } - } - } - - #[async_trait(?Send)] - impl KvStore for MockKv { - async fn get_bytes(&self, key: &str) -> Result, KvError> { - Ok(self.data.lock().unwrap().get(key).cloned()) - } - - async fn put_bytes(&self, key: &str, value: Bytes) -> Result<(), KvError> { - self.data.lock().unwrap().insert(key.to_string(), value); - Ok(()) - } - - async fn put_bytes_with_ttl( - &self, - key: &str, - value: Bytes, - _ttl: std::time::Duration, - ) -> Result<(), KvError> { - self.data.lock().unwrap().insert(key.to_string(), value); - Ok(()) - } - - async fn delete(&self, key: &str) -> Result<(), KvError> { - self.data.lock().unwrap().remove(key); - Ok(()) - } - - async fn list_keys_page( - &self, - prefix: &str, - cursor: Option<&str>, - limit: usize, - ) -> Result { - let data = self.data.lock().unwrap(); - let mut keys = data - .keys() - .filter(|key| { - key.starts_with(prefix) && cursor.is_none_or(|cursor| key.as_str() > cursor) - }) - .cloned() - .collect::>(); - let has_more = keys.len() > limit; - keys.truncate(limit); - - Ok(KvPage { - cursor: has_more.then(|| keys.last().cloned()).flatten(), - keys, - }) - } - } + fn headers_reports_user_agent() { + let ctx = context_with_header( + "/headers", + HeaderName::from_static("user-agent"), + HeaderValue::from_static("DemoAgent"), + ); - fn context_with_kv( - path: &str, - method: Method, - body: Body, - params: &[(&str, &str)], - ) -> (RequestContext, KvHandle) { - let kv = Arc::new(MockKv::new()); - let handle = KvHandle::new(kv); - let mut request = request_builder() - .method(method) - .uri(path) - .body(body) - .expect("request"); - request.extensions_mut().insert(handle.clone()); - let map = params - .iter() - .map(|(k, v)| ((*k).to_string(), (*v).to_string())) - .collect::>(); - (RequestContext::new(request, PathParams::new(map)), handle) + let response = block_on(headers(ctx)) + .expect("handler ok") + .into_response() + .expect("response"); + let bytes = response.into_body().into_bytes().expect("buffered"); + assert_eq!(bytes.as_ref(), b"ua=DemoAgent"); } #[test] @@ -611,39 +592,34 @@ mod tests { let (ctx, _) = context_with_kv("/kv/counter", Method::POST, Body::empty(), &[]); let resp = block_on(kv_counter(ctx)).expect("response"); assert_eq!(resp.status(), StatusCode::OK); - let body = resp.into_body().into_bytes(); + let body = resp.into_body().into_bytes().expect("buffered"); let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); - assert_eq!(json["count"], 1); + assert_eq!(json["count"], 1_i64); } #[test] - fn kv_note_put_and_get() { + fn kv_note_delete_returns_no_content() { let (ctx, handle) = context_with_kv( - "/kv/notes/abc", + "/kv/notes/del", Method::POST, - Body::from("hello world"), - &[("id", "abc")], + Body::from("to-delete"), + &[("id", "del")], ); - let resp = block_on(kv_note_put(ctx)).expect("response"); - assert_eq!(resp.status(), StatusCode::CREATED); + block_on(kv_note_put(ctx)).unwrap(); let (ctx2, _) = { let mut request = request_builder() - .method(Method::GET) - .uri("/kv/notes/abc") + .method(Method::DELETE) + .uri("/kv/notes/del") .body(Body::empty()) .expect("request"); request.extensions_mut().insert(handle.clone()); let mut map = HashMap::new(); - map.insert("id".to_string(), "abc".to_string()); - ( - RequestContext::new(request, PathParams::new(map)), - handle.clone(), - ) + map.insert("id".to_owned(), "del".to_owned()); + (RequestContext::new(request, PathParams::new(map)), handle) }; - let resp = block_on(kv_note_get(ctx2)).expect("response"); - assert_eq!(resp.status(), StatusCode::OK); - assert_eq!(resp.into_body().into_bytes().as_ref(), b"hello world"); + let resp = block_on(kv_note_delete(ctx2)).expect("response"); + assert_eq!(resp.status(), StatusCode::NO_CONTENT); } #[test] @@ -659,64 +635,100 @@ mod tests { } #[test] - fn kv_note_delete_returns_no_content() { + fn kv_note_put_and_get() { let (ctx, handle) = context_with_kv( - "/kv/notes/del", + "/kv/notes/abc", Method::POST, - Body::from("to-delete"), - &[("id", "del")], + Body::from("hello world"), + &[("id", "abc")], ); - block_on(kv_note_put(ctx)).unwrap(); + let put_resp = block_on(kv_note_put(ctx)).expect("response"); + assert_eq!(put_resp.status(), StatusCode::CREATED); let (ctx2, _) = { let mut request = request_builder() - .method(Method::DELETE) - .uri("/kv/notes/del") + .method(Method::GET) + .uri("/kv/notes/abc") .body(Body::empty()) .expect("request"); request.extensions_mut().insert(handle.clone()); let mut map = HashMap::new(); - map.insert("id".to_string(), "del".to_string()); - (RequestContext::new(request, PathParams::new(map)), handle) + map.insert("id".to_owned(), "abc".to_owned()); + ( + RequestContext::new(request, PathParams::new(map)), + handle.clone(), + ) }; - let resp = block_on(kv_note_delete(ctx2)).expect("response"); - assert_eq!(resp.status(), StatusCode::NO_CONTENT); + let get_resp = block_on(kv_note_get(ctx2)).expect("response"); + assert_eq!(get_resp.status(), StatusCode::OK); + assert_eq!( + get_resp + .into_body() + .into_bytes() + .expect("buffered") + .as_ref(), + b"hello world" + ); } - // -- Secrets handler tests ---------------------------------------------- - - use edgezero_core::secret_store::{InMemorySecretStore, SecretHandle}; - - fn context_with_secrets(path: &str, query: &str, entries: &[(&str, &str)]) -> RequestContext { - let provider = InMemorySecretStore::new(entries.iter().map(|(k, v)| { - ( - format!("{SECRET_STORE_NAME}/{k}"), - bytes::Bytes::from(v.to_string()), - ) - })); - let handle = SecretHandle::new(std::sync::Arc::new(provider)); - let uri = format!("{}?{}", path, query); + #[test] + fn proxy_demo_uses_injected_handle() { let mut request = request_builder() .method(Method::GET) - .uri(uri.as_str()) + .uri("/proxy/status/201") .body(Body::empty()) .expect("request"); - request.extensions_mut().insert(handle); - RequestContext::new(request, PathParams::default()) + request + .extensions_mut() + .insert(ProxyHandle::with_client(TestProxyClient)); + + let mut params = HashMap::new(); + params.insert("rest".to_owned(), "status/201".to_owned()); + let ctx = RequestContext::new(request, PathParams::new(params)); + + let response = block_on(proxy_demo(ctx)).expect("response"); + assert_eq!(response.status(), StatusCode::CREATED); } #[test] - fn secrets_echo_returns_secret_value() { - let ctx = context_with_secrets( - "/secrets/echo", - "name=SMOKE_SECRET", - &[("SMOKE_SECRET", "my-secret-value")], - ); - let response = block_on(secrets_echo(ctx)) + fn proxy_demo_without_handle_returns_placeholder() { + let ctx = context_with_params("/proxy/status/200", &[("rest", "status/200")]); + let response = block_on(proxy_demo(ctx)).expect("response"); + assert_eq!(response.status(), StatusCode::NOT_IMPLEMENTED); + } + + #[test] + fn root_returns_static_body() { + let ctx = empty_context("/"); + let response = block_on(root(ctx)) .expect("handler ok") - .into_response(); - let bytes = response.into_body().into_bytes(); - assert_eq!(bytes.as_ref(), b"my-secret-value"); + .into_response() + .expect("response"); + let bytes = response.into_body().into_bytes().expect("buffered"); + assert_eq!(bytes.as_ref(), b"app-demo app"); + } + + #[test] + fn secrets_echo_rejects_non_smoke_secret_names() { + use edgezero_core::http::StatusCode; + + let ctx = context_with_secrets("/secrets/echo", "name=API_KEY", &[("API_KEY", "secret")]); + let response = block_on(secrets_echo(ctx)) + .expect_err("should reject arbitrary secret names") + .into_response() + .expect("response"); + + assert_eq!(response.status(), StatusCode::BAD_REQUEST); + let body = String::from_utf8( + response + .into_body() + .into_bytes() + .expect("buffered") + .to_vec(), + ) + .expect("utf8"); + assert!(body.contains("only smoke-test secret names are allowed")); + assert!(!body.contains("API_KEY")); } #[test] @@ -726,26 +738,55 @@ mod tests { let ctx = context_with_secrets("/secrets/echo", "name=SMOKE_SECRET_MISSING", &[]); let response = block_on(secrets_echo(ctx)) .expect_err("should fail") - .into_response(); + .into_response() + .expect("response"); assert_eq!(response.status(), StatusCode::INTERNAL_SERVER_ERROR); - let body = String::from_utf8(response.into_body().into_bytes().to_vec()).expect("utf8"); + let body = String::from_utf8( + response + .into_body() + .into_bytes() + .expect("buffered") + .to_vec(), + ) + .expect("utf8"); assert!(body.contains("required secret is not configured")); assert!(!body.contains("SMOKE_SECRET_MISSING")); } #[test] - fn secrets_echo_rejects_non_smoke_secret_names() { - use edgezero_core::http::StatusCode; - - let ctx = context_with_secrets("/secrets/echo", "name=API_KEY", &[("API_KEY", "secret")]); + fn secrets_echo_returns_secret_value() { + let ctx = context_with_secrets( + "/secrets/echo", + "name=SMOKE_SECRET", + &[("SMOKE_SECRET", "my-secret-value")], + ); let response = block_on(secrets_echo(ctx)) - .expect_err("should reject arbitrary secret names") - .into_response(); + .expect("handler ok") + .into_response() + .expect("response"); + let bytes = response.into_body().into_bytes().expect("buffered"); + assert_eq!(bytes.as_ref(), b"my-secret-value"); + } - assert_eq!(response.status(), StatusCode::BAD_REQUEST); - let body = String::from_utf8(response.into_body().into_bytes().to_vec()).expect("utf8"); - assert!(body.contains("only smoke-test secret names are allowed")); - assert!(!body.contains("API_KEY")); + #[test] + fn stream_emits_expected_chunks() { + let ctx = empty_context("/stream"); + let response = block_on(stream(ctx)).expect("handler ok"); + assert_eq!(response.status(), StatusCode::OK); + + let mut chunks = response.into_body().into_stream().expect("stream body"); + let collected = block_on(async { + let mut buf = Vec::new(); + while let Some(item) = chunks.next().await { + let chunk = item.expect("chunk"); + buf.extend_from_slice(&chunk); + } + buf + }); + assert_eq!( + String::from_utf8(collected).expect("utf8"), + "chunk 0\nchunk 1\nchunk 2\n" + ); } }