diff --git a/CLI.md b/CLI.md index 87d8218..87fb9cf 100644 --- a/CLI.md +++ b/CLI.md @@ -5,7 +5,7 @@ This document describes all available command-line options for Dispenser. ## Usage ```sh -dispenser [OPTIONS] +dispenser [OPTIONS] [COMMAND] ``` ## Options @@ -48,6 +48,8 @@ No referenced variables ------------------------------------------------------------------------------- ``` + + ### `-p, --pid-file ` Specify the path to the PID file. This file is used to track the running Dispenser process and is required for sending signals with the `--signal` flag. @@ -94,6 +96,29 @@ Display the current version of Dispenser. dispenser --version ``` +## Subcommands + +### `dev` + +Starts Dispenser in local development mode. + +**Key features:** +- **Implicit Simulation**: Automatically enables SSL simulation (self-signed certificates) for all proxy hosts. +- **Selective Loading**: Only loads and renders services specified with the `--service` flag. +- **Dependency Pruning**: Automatically removes dependencies on services that are not being loaded, allowing selected services to start immediately without waiting for missing dependencies. + +**Options:** +- `-s, --service `: The name or path of the service(s) to run. Can be specified multiple times (e.g., `-s api -s db` or `-s api db`). + +**Example:** +```sh +# Only run the 'api' service +dispenser dev --service api + +# Run specific services and bypass others +dispenser dev -s web -s db +``` + ## Common Usage Patterns ### Running in Foreground (for testing) diff --git a/Cargo.lock b/Cargo.lock index 636b58c..8a7d011 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,34 @@ # It is not intended for manual editing. version = 4 +[[package]] +name = "addr2line" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b5d307320b3181d6d7954e663bd7c774a838b8220fe0593c86d9fb09f498b4b" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "getrandom 0.3.4", + "once_cell", + "version_check", + "zerocopy", +] + [[package]] name = "aho-corasick" version = "1.1.3" @@ -11,6 +39,27 @@ dependencies = [ "memchr", ] +[[package]] +name = "alloc-no-stdlib" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" + +[[package]] +name = "alloc-stdlib" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" +dependencies = [ + "alloc-no-stdlib", +] + +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + [[package]] name = "android_system_properties" version = "0.1.5" @@ -69,6 +118,82 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "arc-swap" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d03449bb8ca2cc2ef70869af31463d1ae5ccc8fa3e334b307203fbf815207e" +dependencies = [ + "rustversion", +] + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + +[[package]] +name = "asn1-rs" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56624a96882bb8c26d61312ae18cb45868e5a9992ea73c58e45c3101e56a1e60" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror 2.0.17", + "time", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "async-trait" version = "0.1.89" @@ -77,7 +202,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.87", ] [[package]] @@ -86,24 +211,142 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi", + "libc", + "winapi", +] + [[package]] name = "autocfg" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +[[package]] +name = "aws-lc-rs" +version = "1.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a88aab2464f1f25453baa7a07c84c5b7684e274054ba06817f382357f77a288" +dependencies = [ + "aws-lc-sys", + "untrusted 0.7.1", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b45afffdee1e7c9126814751f88dddc747f41d91da16c9551a0f1e8a11e788a1" +dependencies = [ + "cc", + "cmake", + "dunce", + "fs_extra", +] + +[[package]] +name = "backtrace" +version = "0.3.76" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb531853791a215d7c62a30daf0dde835f381ab5de4589cfe7c649d2cbe92bd6" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-link", +] + [[package]] name = "base64" version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + [[package]] name = "bitflags" version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "bollard" +version = "0.19.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87a52479c9237eb04047ddb94788c41ca0d26eaff8b697ecfbb4c32f7fdc3b1b" +dependencies = [ + "base64", + "bollard-stubs", + "bytes", + "futures-core", + "futures-util", + "hex", + "http", + "http-body-util", + "hyper", + "hyper-named-pipe", + "hyper-util", + "hyperlocal", + "log", + "pin-project-lite", + "serde", + "serde_derive", + "serde_json", + "serde_repr", + "serde_urlencoded", + "thiserror 2.0.17", + "tokio", + "tokio-util", + "tower-service", + "url", + "winapi", +] + +[[package]] +name = "bollard-stubs" +version = "1.49.1-rc.28.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5731fe885755e92beff1950774068e0cae67ea6ec7587381536fca84f1779623" +dependencies = [ + "serde", + "serde_json", + "serde_repr", + "serde_with", +] + [[package]] name = "bon" version = "3.8.1" @@ -126,7 +369,28 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn", + "syn 2.0.87", +] + +[[package]] +name = "brotli" +version = "3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d640d25bc63c50fb1f0b545ffd80207d2e10a4c965530809b40ba3386825c391" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", + "brotli-decompressor", +] + +[[package]] +name = "brotli-decompressor" +version = "2.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e2e4afe60d7dd600fdd3de8d0f08c2b7ec039712e3b6137ff98b7004e82de4f" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", ] [[package]] @@ -135,6 +399,12 @@ version = "3.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + [[package]] name = "bytes" version = "1.11.0" @@ -146,14 +416,50 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.39" +version = "1.2.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1354349954c6fc9cb0deab020f27f783cf0b604e8bb754dc4658ecf0d29c35f" +checksum = "7a0aeaff4ff1a90589618835a598e545176939b97874f7abc7851caa0618f203" dependencies = [ "find-msvc-tools", + "jobserver", + "libc", "shlex", ] +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + +[[package]] +name = "cf-rustracing" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93f85c3824e4191621dec0551e3cef3d511f329da9a8990bf3e450a85651d97e" +dependencies = [ + "backtrace", + "rand 0.8.5", + "tokio", + "trackable", +] + +[[package]] +name = "cf-rustracing-jaeger" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6a5f80d44c257c3300a7f45ada676c211e64bbbac591bbec19344a8f61fbcab" +dependencies = [ + "cf-rustracing", + "hostname", + "local-ip-address", + "percent-encoding", + "rand 0.9.2", + "thrift_codec", + "tokio", + "trackable", +] + [[package]] name = "cfg-if" version = "1.0.3" @@ -180,6 +486,23 @@ dependencies = [ "windows-link", ] +[[package]] +name = "clap" +version = "3.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" +dependencies = [ + "atty", + "bitflags 1.3.2", + "clap_derive 3.2.25", + "clap_lex 0.2.4", + "indexmap 1.9.3", + "once_cell", + "strsim 0.10.0", + "termcolor", + "textwrap", +] + [[package]] name = "clap" version = "4.5.18" @@ -187,7 +510,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" dependencies = [ "clap_builder", - "clap_derive", + "clap_derive 4.5.18", ] [[package]] @@ -198,8 +521,21 @@ checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" dependencies = [ "anstream", "anstyle", - "clap_lex", - "strsim", + "clap_lex 0.7.2", + "strsim 0.11.1", +] + +[[package]] +name = "clap_derive" +version = "3.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" +dependencies = [ + "heck 0.4.1", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", ] [[package]] @@ -208,10 +544,19 @@ version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", - "syn", + "syn 2.0.87", +] + +[[package]] +name = "clap_lex" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" +dependencies = [ + "os_str_bytes", ] [[package]] @@ -220,18 +565,56 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" +[[package]] +name = "cmake" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" +dependencies = [ + "cc", +] + [[package]] name = "colorchoice" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + +[[package]] +name = "core-foundation" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "core-foundation-sys" version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" +[[package]] +name = "crc32fast" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" +dependencies = [ + "cfg-if", +] + [[package]] name = "cron" version = "0.15.0" @@ -244,6 +627,40 @@ dependencies = [ "winnow", ] +[[package]] +name = "crossbeam-queue" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "crypto-common" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "daemonize" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab8bfdaacb3c887a54d41bdf48d3af8873b3f5566469f8ba21b92057509f116e" +dependencies = [ + "libc", +] + [[package]] name = "darling" version = "0.21.3" @@ -264,8 +681,8 @@ dependencies = [ "ident_case", "proc-macro2", "quote", - "strsim", - "syn", + "strsim 0.11.1", + "syn 2.0.87", ] [[package]] @@ -276,43 +693,98 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core", "quote", - "syn", + "syn 2.0.87", ] [[package]] -name = "deranged" -version = "0.5.5" +name = "data-encoding" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" +checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" + +[[package]] +name = "der-parser" +version = "10.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07da5016415d5a3c4dd39b11ed26f915f52fc4e0dc197d87908bc916e51bc1a6" +dependencies = [ + "asn1-rs", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + +[[package]] +name = "deranged" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" dependencies = [ "powerfmt", "serde_core", ] +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", + "subtle", +] + [[package]] name = "dispenser" -version = "0.8.0" +version = "0.10.0" dependencies = [ + "async-trait", "base64", + "bollard", "chrono", - "clap", + "clap 4.5.18", "cron", "env_logger", "futures", "futures-util", "google-cloud-gax", "google-cloud-secretmanager-v1", + "http", + "instant-acme", "log", "minijinja", - "nix", + "nix 0.29.0", + "openssl", + "pingora", + "pingora-core", + "pingora-error", + "pingora-http", + "pingora-load-balancing", + "pingora-proxy", + "rcgen", "sd-notify", "serde", "serde_json", "signal-hook", - "thiserror", + "thiserror 2.0.17", "tokio", "toml", "urlencoding", + "x509-parser", ] [[package]] @@ -323,15 +795,27 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.87", ] +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + [[package]] name = "dyn-clone" version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + [[package]] name = "env_filter" version = "0.1.2" @@ -363,9 +847,20 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "find-msvc-tools" -version = "0.1.2" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ced73b1dacfc750a6db6c0a0c3a3853c8b41997e2e2c563dc90804ae6867959" +checksum = "645cbb3a84e60b7531617d5ae4e57f7e27308f6445f5abf653209ea76dec8dff" + +[[package]] +name = "flate2" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" +dependencies = [ + "crc32fast", + "libz-ng-sys", + "miniz_oxide", +] [[package]] name = "fnv" @@ -373,6 +868,27 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.2.2" @@ -382,6 +898,12 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "futures" version = "0.3.31" @@ -438,7 +960,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.87", ] [[package]] @@ -471,6 +993,16 @@ dependencies = [ "slab", ] +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + [[package]] name = "getrandom" version = "0.2.16" @@ -498,6 +1030,12 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "gimli" +version = "0.32.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7" + [[package]] name = "google-cloud-auth" version = "1.3.0" @@ -516,7 +1054,7 @@ dependencies = [ "rustls-pemfile", "serde", "serde_json", - "thiserror", + "thiserror 2.0.17", "time", "tokio", ] @@ -534,10 +1072,10 @@ dependencies = [ "google-cloud-wkt", "http", "pin-project", - "rand", + "rand 0.9.2", "serde", "serde_json", - "thiserror", + "thiserror 2.0.17", "tokio", ] @@ -561,7 +1099,7 @@ dependencies = [ "rustc_version", "serde", "serde_json", - "thiserror", + "thiserror 2.0.17", "tokio", "tracing", ] @@ -663,11 +1201,30 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror", + "thiserror 2.0.17", "time", "url", ] +[[package]] +name = "h2" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap 2.5.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "hashbrown" version = "0.12.3" @@ -680,18 +1237,55 @@ version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", +] + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + [[package]] name = "heck" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + [[package]] name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hostname" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "617aaa3557aef3810a6369d0a99fac8a080891b68bd9f9812a1eeda0c0730cbd" +dependencies = [ + "cfg-if", + "libc", + "windows-link", +] + [[package]] name = "http" version = "1.4.0" @@ -731,6 +1325,12 @@ version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + [[package]] name = "humantime" version = "2.1.0" @@ -747,9 +1347,11 @@ dependencies = [ "bytes", "futures-channel", "futures-core", + "h2", "http", "http-body", "httparse", + "httpdate", "itoa", "pin-project-lite", "pin-utils", @@ -758,6 +1360,21 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-named-pipe" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73b7d8abf35697b81a825e386fc151e0d503e8cb5fcb93cc8669c376dfd6f278" +dependencies = [ + "hex", + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", + "winapi", +] + [[package]] name = "hyper-rustls" version = "0.27.7" @@ -768,7 +1385,9 @@ dependencies = [ "hyper", "hyper-util", "rustls", + "rustls-native-certs", "rustls-pki-types", + "rustls-platform-verifier", "tokio", "tokio-rustls", "tower-service", @@ -799,6 +1418,21 @@ dependencies = [ "tracing", ] +[[package]] +name = "hyperlocal" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "986c5ce3b994526b3cd75578e62554abd09f0899d6206de48b3e96ab34ccc8c7" +dependencies = [ + "hex", + "http-body-util", + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + [[package]] name = "iana-time-zone" version = "0.1.64" @@ -953,6 +1587,32 @@ dependencies = [ "serde", ] +[[package]] +name = "instant-acme" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9e04488259349908dd13fadaf3a97523b61da296a468c490e112ecc73d28b47" +dependencies = [ + "async-trait", + "aws-lc-rs", + "base64", + "bytes", + "http", + "http-body", + "http-body-util", + "httpdate", + "hyper", + "hyper-rustls", + "hyper-util", + "rcgen", + "rustls", + "rustls-pki-types", + "serde", + "serde_json", + "thiserror 2.0.17", + "tokio", +] + [[package]] name = "ipnet" version = "2.11.0" @@ -981,6 +1641,38 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +[[package]] +name = "jni" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" +dependencies = [ + "cesu8", + "cfg-if", + "combine", + "jni-sys", + "log", + "thiserror 1.0.69", + "walkdir", + "windows-sys 0.45.0", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + +[[package]] +name = "jobserver" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" +dependencies = [ + "getrandom 0.3.4", + "libc", +] + [[package]] name = "js-sys" version = "0.3.81" @@ -1003,12 +1695,40 @@ version = "0.2.178" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" +[[package]] +name = "libz-ng-sys" +version = "1.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bf914b7dd154ca9193afec311d8e39345c1bd93b48b3faa77329f0db8f553c0" +dependencies = [ + "cmake", + "libc", +] + +[[package]] +name = "linked-hash-map" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" + [[package]] name = "litemap" version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" +[[package]] +name = "local-ip-address" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "656b3b27f8893f7bbf9485148ff9a65f019e3f33bd5cdc87c83cab16b3fd9ec8" +dependencies = [ + "libc", + "neli", + "thiserror 2.0.17", + "windows-sys 0.59.0", +] + [[package]] name = "lock_api" version = "0.4.14" @@ -1024,6 +1744,15 @@ version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +[[package]] +name = "lru" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f8cc7106155f10bdf99a6f379688f543ad6596a415375b36a59a054ceda1198" +dependencies = [ + "hashbrown 0.15.5", +] + [[package]] name = "lru-slab" version = "0.1.2" @@ -1036,6 +1765,15 @@ version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +[[package]] +name = "memoffset" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +dependencies = [ + "autocfg", +] + [[package]] name = "minijinja" version = "2.12.0" @@ -1047,115 +1785,548 @@ dependencies = [ ] [[package]] -name = "mio" -version = "1.1.1" +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", + "simd-adler32", +] + +[[package]] +name = "mio" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.61.1", +] + +[[package]] +name = "neli" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93062a0dce6da2517ea35f301dfc88184ce18d3601ec786a727a87bf535deca9" +dependencies = [ + "byteorder", + "libc", + "log", + "neli-proc-macros", +] + +[[package]] +name = "neli-proc-macros" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c8034b7fbb6f9455b2a96c19e6edf8dc9fc34c70449938d8ee3b4df363f61fe" +dependencies = [ + "either", + "proc-macro2", + "quote", + "serde", + "syn 1.0.109", +] + +[[package]] +name = "nix" +version = "0.24.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" +dependencies = [ + "bitflags 1.3.2", + "cfg-if", + "libc", + "memoffset", +] + +[[package]] +name = "nix" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +dependencies = [ + "bitflags 2.10.0", + "cfg-if", + "cfg_aliases", + "libc", +] + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "object" +version = "0.37.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe" +dependencies = [ + "memchr", +] + +[[package]] +name = "oid-registry" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12f40cff3dde1b6087cc5d5f5d4d65712f34016a03ed60e9c08dcc392736b5b7" +dependencies = [ + "asn1-rs", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "openssl" +version = "0.10.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" +dependencies = [ + "bitflags 2.10.0", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + +[[package]] +name = "openssl-probe" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" + +[[package]] +name = "openssl-probe" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f50d9b3dabb09ecd771ad0aa242ca6894994c130308ca3d7684634df8037391" + +[[package]] +name = "openssl-src" +version = "300.5.4+3.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a507b3792995dae9b0df8a1c1e3771e8418b7c2d9f0baeba32e6fe8b06c7cb72" +dependencies = [ + "cc", +] + +[[package]] +name = "openssl-sys" +version = "0.9.111" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" +dependencies = [ + "cc", + "libc", + "openssl-src", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e62e29dfe041afb8ed2a6c9737ab57db4907285d999ef8ad3a59092a36bdc846" + +[[package]] +name = "os_str_bytes" +version = "6.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" + +[[package]] +name = "parking_lot" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-link", +] + +[[package]] +name = "pem" +version = "3.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d30c53c26bc5b31a98cd02d20f25a7c8567146caf63ed593a9d87b2775291be" +dependencies = [ + "base64", + "serde_core", +] + +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "pin-project" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pingora" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a1f02a6347e81953ab831fdcf090a028db12d67ec3badf47831d1299dac6e20" +dependencies = [ + "pingora-cache", + "pingora-core", + "pingora-http", + "pingora-load-balancing", + "pingora-proxy", + "pingora-timeout", +] + +[[package]] +name = "pingora-cache" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef622051fbb2cb98a524df3a8112f02d0919ccda600a44d705ec550f1a28fe2" +dependencies = [ + "ahash", + "async-trait", + "blake2", + "bytes", + "cf-rustracing", + "cf-rustracing-jaeger", + "hex", + "http", + "httparse", + "httpdate", + "indexmap 1.9.3", + "log", + "lru", + "once_cell", + "parking_lot", + "pingora-core", + "pingora-error", + "pingora-header-serde", + "pingora-http", + "pingora-lru", + "pingora-timeout", + "rand 0.8.5", + "regex", + "rmp", + "rmp-serde", + "serde", + "strum", + "tokio", +] + +[[package]] +name = "pingora-core" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" +checksum = "76f63d3f67d99c95a1f85623fc43242fd644dd12ccbaa18c38a54e1580c6846a" dependencies = [ + "ahash", + "async-trait", + "brotli", + "bytes", + "chrono", + "clap 3.2.25", + "daemonize", + "derivative", + "flate2", + "futures", + "h2", + "http", + "httparse", + "httpdate", "libc", - "wasi", - "windows-sys 0.61.1", + "log", + "nix 0.24.3", + "once_cell", + "openssl-probe 0.1.6", + "parking_lot", + "percent-encoding", + "pingora-error", + "pingora-http", + "pingora-openssl", + "pingora-pool", + "pingora-runtime", + "pingora-timeout", + "prometheus", + "rand 0.8.5", + "regex", + "serde", + "serde_yaml", + "sfv", + "socket2", + "strum", + "strum_macros", + "tokio", + "tokio-test", + "unicase", + "windows-sys 0.59.0", + "zstd", ] [[package]] -name = "nix" -version = "0.29.0" +name = "pingora-error" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" -dependencies = [ - "bitflags", - "cfg-if", - "cfg_aliases", - "libc", -] +checksum = "52119570d3f4644e09654ad24df2b7d851bf12eaa8c4148b4674c7f90916598e" [[package]] -name = "num-conv" -version = "0.1.0" +name = "pingora-header-serde" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +checksum = "252a16def05c7adbbdda776e87b2be36e9481c8a77249207a2f3b563e8933b35" +dependencies = [ + "bytes", + "http", + "httparse", + "pingora-error", + "pingora-http", + "thread_local", + "zstd", + "zstd-safe", +] [[package]] -name = "num-traits" -version = "0.2.19" +name = "pingora-http" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +checksum = "5a3542fd0fd0a83212882c5066ae739ba51804f20d624ff7e12ec85113c5c89a" dependencies = [ - "autocfg", + "bytes", + "http", + "pingora-error", ] [[package]] -name = "once_cell" -version = "1.21.3" +name = "pingora-ketama" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +checksum = "3f5dd8546b1874d5cfca594375c1cfb852c3dffd4f060428fa031a6e790dea18" +dependencies = [ + "crc32fast", +] [[package]] -name = "opentelemetry-semantic-conventions" -version = "0.31.0" +name = "pingora-load-balancing" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e62e29dfe041afb8ed2a6c9737ab57db4907285d999ef8ad3a59092a36bdc846" +checksum = "4b5bb0314830a64b73b50b3782f3089f87947b61b4324c804d6f8d4ff9ce1c70" +dependencies = [ + "arc-swap", + "async-trait", + "derivative", + "fnv", + "futures", + "http", + "log", + "pingora-core", + "pingora-error", + "pingora-http", + "pingora-ketama", + "pingora-runtime", + "rand 0.8.5", + "tokio", +] [[package]] -name = "parking_lot" -version = "0.12.5" +name = "pingora-lru" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" +checksum = "ba89e4400cb978f0d7be1c14bd7ab4168c8e2c00d97ff19f964fc0048780237c" dependencies = [ - "lock_api", - "parking_lot_core", + "arrayvec", + "hashbrown 0.15.5", + "parking_lot", + "rand 0.8.5", ] [[package]] -name = "parking_lot_core" -version = "0.9.12" +name = "pingora-openssl" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" +checksum = "d390f21024c6c8e171fd9ee82aee0cacdd324d079535d1b47011acabed87673b" dependencies = [ - "cfg-if", + "foreign-types", "libc", - "redox_syscall", - "smallvec", - "windows-link", + "openssl", + "openssl-sys", + "tokio-openssl", ] [[package]] -name = "percent-encoding" -version = "2.3.2" +name = "pingora-pool" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" +checksum = "996c574f30a6e1ad10b47ac1626a86e0e47d5075953dd049d60df16ba5f7076e" +dependencies = [ + "crossbeam-queue", + "log", + "lru", + "parking_lot", + "pingora-timeout", + "thread_local", + "tokio", +] [[package]] -name = "pin-project" -version = "1.1.10" +name = "pingora-proxy" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +checksum = "6c4097fd2639905bf5b81f3618551cd826d5e03aac063e17fd7a4137f19c1a5b" dependencies = [ - "pin-project-internal", + "async-trait", + "bytes", + "clap 3.2.25", + "futures", + "h2", + "http", + "log", + "once_cell", + "pingora-cache", + "pingora-core", + "pingora-error", + "pingora-http", + "rand 0.8.5", + "regex", + "tokio", ] [[package]] -name = "pin-project-internal" -version = "1.1.10" +name = "pingora-runtime" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +checksum = "8ccc165021cf55a39b9e760121b22c4260b17a0b2c530d5b93092fc5bc765b94" dependencies = [ - "proc-macro2", - "quote", - "syn", + "once_cell", + "rand 0.8.5", + "thread_local", + "tokio", ] [[package]] -name = "pin-project-lite" -version = "0.2.16" +name = "pingora-timeout" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" +checksum = "548cd21d41611c725827677937e68f2cd008bbfa09f3416d3fbad07e1e42f6d7" +dependencies = [ + "once_cell", + "parking_lot", + "pin-project-lite", + "thread_local", + "tokio", +] [[package]] -name = "pin-utils" -version = "0.1.0" +name = "pkg-config" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "potential_utf" @@ -1188,7 +2359,31 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn", + "syn 2.0.87", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", ] [[package]] @@ -1200,6 +2395,27 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "prometheus" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d33c28a30771f7f96db69893f78b857f7450d7e0237e9c8fc6427a81bae7ed1" +dependencies = [ + "cfg-if", + "fnv", + "lazy_static", + "memchr", + "parking_lot", + "protobuf", + "thiserror 1.0.69", +] + +[[package]] +name = "protobuf" +version = "2.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" + [[package]] name = "quinn" version = "0.11.9" @@ -1214,7 +2430,7 @@ dependencies = [ "rustc-hash", "rustls", "socket2", - "thiserror", + "thiserror 2.0.17", "tokio", "tracing", "web-time", @@ -1229,13 +2445,13 @@ dependencies = [ "bytes", "getrandom 0.3.4", "lru-slab", - "rand", + "rand 0.9.2", "ring", "rustc-hash", "rustls", "rustls-pki-types", "slab", - "thiserror", + "thiserror 2.0.17", "tinyvec", "tracing", "web-time", @@ -1270,14 +2486,35 @@ version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + [[package]] name = "rand" version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ - "rand_chacha", - "rand_core", + "rand_chacha 0.9.0", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", ] [[package]] @@ -1287,7 +2524,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.16", ] [[package]] @@ -1299,13 +2545,28 @@ dependencies = [ "getrandom 0.3.4", ] +[[package]] +name = "rcgen" +version = "0.14.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ec0a99f2de91c3cddc84b37e7db80e4d96b743e05607f647eb236fc0455907f" +dependencies = [ + "aws-lc-rs", + "pem", + "ring", + "rustls-pki-types", + "time", + "x509-parser", + "yasna", +] + [[package]] name = "redox_syscall" version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags", + "bitflags 2.10.0", ] [[package]] @@ -1325,7 +2586,7 @@ checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.87", ] [[package]] @@ -1405,10 +2666,45 @@ dependencies = [ "cfg-if", "getrandom 0.2.16", "libc", - "untrusted", + "untrusted 0.9.0", "windows-sys 0.52.0", ] +[[package]] +name = "rmp" +version = "0.8.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ba8be72d372b2c9b35542551678538b562e7cf86c3315773cae48dfbfe7790c" +dependencies = [ + "num-traits", +] + +[[package]] +name = "rmp-serde" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72f81bee8c8ef9b577d1681a70ebbc962c232461e397b22c208c43c04b67a155" +dependencies = [ + "rmp", + "serde", +] + +[[package]] +name = "rust_decimal" +version = "1.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35affe401787a9bd846712274d97654355d21b2a2c092a3139aabe31e9022282" +dependencies = [ + "arrayvec", + "num-traits", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" + [[package]] name = "rustc-hash" version = "2.1.1" @@ -1424,12 +2720,22 @@ dependencies = [ "semver", ] +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom", +] + [[package]] name = "rustls" version = "0.23.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" dependencies = [ + "aws-lc-rs", "log", "once_cell", "ring", @@ -1439,6 +2745,18 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rustls-native-certs" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" +dependencies = [ + "openssl-probe 0.2.0", + "rustls-pki-types", + "schannel", + "security-framework", +] + [[package]] name = "rustls-pemfile" version = "2.2.0" @@ -1458,15 +2776,43 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rustls-platform-verifier" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d99feebc72bae7ab76ba994bb5e121b8d83d910ca40b36e0921f53becc41784" +dependencies = [ + "core-foundation", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki", + "security-framework", + "security-framework-sys", + "webpki-root-certs", + "windows-sys 0.61.1", +] + +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" + [[package]] name = "rustls-webpki" version = "0.103.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" dependencies = [ + "aws-lc-rs", "ring", "rustls-pki-types", - "untrusted", + "untrusted 0.9.0", ] [[package]] @@ -1481,6 +2827,24 @@ version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "schannel" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" +dependencies = [ + "windows-sys 0.61.1", +] + [[package]] name = "schemars" version = "0.9.0" @@ -1517,6 +2881,29 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4646d6f919800cd25c50edb49438a1381e2cd4833c027e75e8897981c50b8b5e" +[[package]] +name = "security-framework" +version = "3.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" +dependencies = [ + "bitflags 2.10.0", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "semver" version = "1.0.27" @@ -1550,7 +2937,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.87", ] [[package]] @@ -1566,6 +2953,17 @@ dependencies = [ "serde_core", ] +[[package]] +name = "serde_repr" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "serde_spanned" version = "0.6.7" @@ -1615,7 +3013,30 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn", + "syn 2.0.87", +] + +[[package]] +name = "serde_yaml" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" +dependencies = [ + "indexmap 1.9.3", + "ryu", + "serde", + "yaml-rust", +] + +[[package]] +name = "sfv" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fa1f336066b758b7c9df34ed049c0e693a426afe2b27ff7d5b14f410ab1a132" +dependencies = [ + "base64", + "indexmap 2.5.0", + "rust_decimal", ] [[package]] @@ -1643,6 +3064,12 @@ dependencies = [ "libc", ] +[[package]] +name = "simd-adler32" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" + [[package]] name = "slab" version = "0.4.11" @@ -1671,18 +3098,57 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + [[package]] name = "strsim" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" +[[package]] +name = "strum" +version = "0.26.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.87", +] + [[package]] name = "subtle" version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + [[package]] name = "syn" version = "2.0.87" @@ -1711,7 +3177,31 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.87", +] + +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "textwrap" +version = "0.16.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c13547615a44dc9c452a8a534638acdf07120d4b6847c8178705da06306a3057" + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", ] [[package]] @@ -1720,7 +3210,18 @@ version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" dependencies = [ - "thiserror-impl", + "thiserror-impl 2.0.17", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", ] [[package]] @@ -1731,7 +3232,26 @@ checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.87", +] + +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "thrift_codec" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83d957f535b242b91aa9f47bde08080f9a6fef276477e55b0079979d002759d5" +dependencies = [ + "byteorder", + "trackable", ] [[package]] @@ -1815,7 +3335,18 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.87", +] + +[[package]] +name = "tokio-openssl" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59df6849caa43bb7567f9a36f863c447d95a11d5903c9cc334ba32576a27eadd" +dependencies = [ + "openssl", + "openssl-sys", + "tokio", ] [[package]] @@ -1828,6 +3359,43 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-test" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2468baabc3311435b55dd935f702f42cd1b8abb7e754fb7dfb16bd36aa88f9f7" +dependencies = [ + "async-stream", + "bytes", + "futures-core", + "tokio", + "tokio-stream", +] + +[[package]] +name = "tokio-util" +version = "0.7.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + [[package]] name = "toml" version = "0.8.19" @@ -1883,7 +3451,7 @@ version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ - "bitflags", + "bitflags 2.10.0", "bytes", "futures-util", "http", @@ -1926,7 +3494,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.87", ] [[package]] @@ -1938,18 +3506,55 @@ dependencies = [ "once_cell", ] +[[package]] +name = "trackable" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15bd114abb99ef8cee977e517c8f37aee63f184f2d08e3e6ceca092373369ae" +dependencies = [ + "trackable_derive", +] + +[[package]] +name = "trackable_derive" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebeb235c5847e2f82cfe0f07eb971d1e5f6804b18dac2ae16349cc604380f82f" +dependencies = [ + "quote", + "syn 1.0.109", +] + [[package]] name = "try-lock" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" +[[package]] +name = "typenum" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" + +[[package]] +name = "unicase" +version = "2.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" + [[package]] name = "unicode-ident" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + [[package]] name = "untrusted" version = "0.9.0" @@ -1986,6 +3591,28 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + [[package]] name = "want" version = "0.3.1" @@ -2033,7 +3660,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn", + "syn 2.0.87", "wasm-bindgen-shared", ] @@ -2068,7 +3695,7 @@ checksum = "9f07d2f20d4da7b26400c9f4a0511e6e0345b040694e8a75bd41d578fa4421d7" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.87", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -2102,6 +3729,15 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webpki-root-certs" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36a29fc0408b113f68cf32637857ab740edfafdf460c326cd2afaa2d84cc05dc" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "webpki-roots" version = "1.0.4" @@ -2111,6 +3747,37 @@ dependencies = [ "rustls-pki-types", ] +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys 0.61.1", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + [[package]] name = "windows-core" version = "0.62.1" @@ -2132,7 +3799,7 @@ checksum = "edb307e42a74fb6de9bf3a02d9712678b22399c87e6fa869d6dfcd8c1b7754e0" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.87", ] [[package]] @@ -2143,7 +3810,7 @@ checksum = "c0abd1ddbc6964ac14db11c7213d6532ef34bd9aa042c2e5935f59d7908b46a5" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.87", ] [[package]] @@ -2170,6 +3837,15 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + [[package]] name = "windows-sys" version = "0.52.0" @@ -2179,6 +3855,15 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + [[package]] name = "windows-sys" version = "0.60.2" @@ -2197,6 +3882,21 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + [[package]] name = "windows-targets" version = "0.52.6" @@ -2230,6 +3930,12 @@ dependencies = [ "windows_x86_64_msvc 0.53.1", ] +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" @@ -2242,6 +3948,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + [[package]] name = "windows_aarch64_msvc" version = "0.52.6" @@ -2254,6 +3966,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + [[package]] name = "windows_i686_gnu" version = "0.52.6" @@ -2278,6 +3996,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + [[package]] name = "windows_i686_msvc" version = "0.52.6" @@ -2290,6 +4014,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + [[package]] name = "windows_x86_64_gnu" version = "0.52.6" @@ -2302,6 +4032,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" @@ -2314,6 +4050,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + [[package]] name = "windows_x86_64_msvc" version = "0.52.6" @@ -2347,6 +4089,43 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" +[[package]] +name = "x509-parser" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb3e137310115a65136898d2079f003ce33331a6c4b0d51f1531d1be082b6425" +dependencies = [ + "asn1-rs", + "aws-lc-rs", + "data-encoding", + "der-parser", + "lazy_static", + "nom", + "oid-registry", + "ring", + "rusticata-macros", + "thiserror 2.0.17", + "time", +] + +[[package]] +name = "yaml-rust" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +dependencies = [ + "linked-hash-map", +] + +[[package]] +name = "yasna" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" +dependencies = [ + "time", +] + [[package]] name = "yoke" version = "0.8.1" @@ -2366,7 +4145,7 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.87", "synstructure", ] @@ -2387,7 +4166,7 @@ checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.87", ] [[package]] @@ -2407,7 +4186,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.87", "synstructure", ] @@ -2447,5 +4226,33 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.87", +] + +[[package]] +name = "zstd" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" +dependencies = [ + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.16+zstd.1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" +dependencies = [ + "cc", + "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index 72c63a9..a04fe3b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,11 +1,12 @@ [package] name = "dispenser" -version = "0.8.0" +version = "0.10.0" edition = "2021" license = "MIT" [dependencies] base64 = "0.22.1" +bollard = "0.19.4" chrono = "0.4.42" clap = { version = "4.5.18", features = ["derive"] } cron = { version = "0.15.0", features = ["serde"] } @@ -25,3 +26,17 @@ thiserror = "2.0.17" tokio = { version = "1.48.0", features = ["full"] } toml = "0.8.19" urlencoding = "2.1.3" + + +async-trait = "0.1.89" +pingora = { version = "0.6.0", features = ["lb", "openssl"] } +pingora-core = "0.6.0" +pingora-error = "0.6.0" +pingora-http = "0.6.0" +pingora-load-balancing = "0.6.0" +pingora-proxy = "0.6.0" +openssl = "0.10" +rcgen = "0.14.6" +instant-acme = "0.8.4" +x509-parser = "0.18.0" +http = "1.4.0" diff --git a/INSTALL.deb.md b/INSTALL.deb.md index 8f26bfa..363b11c 100644 --- a/INSTALL.deb.md +++ b/INSTALL.deb.md @@ -2,7 +2,7 @@ ## Requirements -Dispenser requieres Docker and Docker Compose to be installed in the system +Dispenser requires Docker to be installed in the system as well as [pass](https://www.passwordstore.org/). ## Install Docker @@ -21,7 +21,7 @@ wget ... ```sh -sudo apt install ./dispenser-0.8.0-0.x86_64.deb +sudo apt install ./dispenser-0.10.0.0-0.x86_64.deb ``` You can validate that it was successfully installed by switching to the diff --git a/INSTALL.redhat.md b/INSTALL.redhat.md index ebb9192..5ca7aeb 100644 --- a/INSTALL.redhat.md +++ b/INSTALL.redhat.md @@ -2,7 +2,7 @@ ## Requirements -Dispenser requieres Docker and Docker Compose to be installed in the system +Dispenser requires Docker to be installed in the system as well as [pass](https://www.redhat.com/en/blog/management-password-store). `pass` is only available on [EPEL](https://www.redhat.com/en/blog/whats-epel-and-how-do-i-use-it) @@ -23,7 +23,7 @@ wget ... ```sh -sudo dnf install ./dispenser-0.8.0-0.x86_64.rpm +sudo dnf install ./dispenser-0.10.0.0-0.x86_64.rpm ``` You can validate that it was successfully installed by switching to the diff --git a/MIGRATION_GUIDE.md b/MIGRATION_GUIDE.md deleted file mode 100644 index 31abb14..0000000 --- a/MIGRATION_GUIDE.md +++ /dev/null @@ -1,628 +0,0 @@ -# Migration Guide: Docker Compose to service.toml - -This guide helps you migrate from the older Dispenser repository structure using `docker-compose.yaml` files to the new structure using `service.toml` files. - -## Overview - -The new structure replaces Docker Compose YAML files with TOML-based service configuration files. The key changes are: - -1. **Per-service configuration**: Each service now has its own `service.toml` file instead of `docker-compose.yaml` -2. **Network declarations**: Networks are now declared in `dispenser.toml` instead of in each `docker-compose.yaml` -3. **Simplified dispenser.toml**: The main configuration file is simplified - it only lists services by path and defines shared networks -4. **Service-level settings**: Image tracking, cron schedules, and initialization behavior are now defined in each `service.toml` -5. **Same interpolation syntax**: Variable interpolation using `${variable_name}` works exactly the same way - -## File Structure Comparison - -### Old Structure -``` -project/ -├── dispenser.toml # Contains service paths, images, cron, initialize settings -├── dispenser.vars # Variable definitions -└── service-name/ - └── docker-compose.yaml # Docker Compose service definition (with networks defined here) -``` - -### New Structure -``` -project/ -├── dispenser.toml # Contains service paths, polling delay, and network declarations -├── dispenser.vars # Variable definitions (unchanged) -└── service-name/ - └── service.toml # Complete service configuration (references networks) -``` - -## Main Configuration File Migration - -### dispenser.toml - -**Old format:** -```toml -delay = 60 - -[[instance]] -path = "nginx" -images = [{ registry = "${docker_io}", name = "nginx", tag = "latest" }] - -[[instance]] -path = "hello-world" -cron = "*/10 * * * * *" -initialize = "on-trigger" -``` - -**New format:** -```toml -# Delay in seconds between polling for new images (default: 60) -delay = 60 - -# Network declarations (optional) -[[network]] -name = "dispenser-net" -driver = "bridge" - -[[service]] -path = "nginx" - -[[service]] -path = "hello-world" -``` - -**Key changes:** -- `[[instance]]` → `[[service]]` -- Remove `images`, `cron`, and `initialize` fields (they move to `service.toml`) -- Keep only `path` to indicate service location -- Add `[[network]]` declarations at the top level (moved from docker-compose.yaml) - -### dispenser.vars - -**No changes required** - the variable file format remains the same: - -```toml -docker_io="docker.io" -nginx_port="8080" -``` - -Variable interpolation using `${variable_name}` syntax works identically in both formats. - -## Service Configuration Migration - -Each service directory needs a `service.toml` file to replace its `docker-compose.yaml`. - -### Example 1: Basic Web Service (nginx) - -**Old (docker-compose.yaml):** -```yaml -version: "3.8" -services: - nginx: - image: ${docker_io}/nginx:latest - ports: - - "8080:80" -``` - -**New (service.toml):** -```toml -# Service metadata (required) -[service] -name = "nginx-service" -image = "${docker_io}/nginx:latest" - -# Port mappings (optional) -[[port]] -host = 8080 -container = 80 - -# Network references (optional) -[[network]] -name = "dispenser-net" - -# Restart policy (optional, defaults to "no") -restart = "always" - -# Dispenser-specific configuration (required) -[dispenser] -# Watch for image updates -watch = true - -# Initialize immediately on startup (default behavior) -initialize = "immediately" -``` - -### Example 2: Scheduled Job (hello-world) - -**Old (docker-compose.yaml):** -```yaml -version: "3.8" -services: - hello-world: - image: hello-world - restart: no -``` - -**Old (dispenser.toml entry):** -```toml -[[instance]] -path = "hello-world" -cron = "*/10 * * * * *" -initialize = "on-trigger" -``` - -**New (service.toml):** -```toml -# Service metadata (required) -[service] -name = "hello-world-job" -image = "hello-world" - -# Network references (optional) -[[network]] -name = "dispenser-net" - -# Restart policy (optional, defaults to "no") -restart = "no" - -# Dispenser-specific configuration (required) -[dispenser] -# Don't watch for image updates -watch = false - -# Initialize only when triggered (by cron in this case) -initialize = "on-trigger" - -# Run every 10 seconds -cron = "*/10 * * * * *" -``` - -## Field Mapping Reference - -### Service-Level Fields - -| Docker Compose | service.toml | Notes | -|----------------|--------------|-------| -| `services..image` | `[service] image` | Same interpolation syntax | -| `services..ports` | `[[port]]` sections | One `[[port]]` per mapping | -| `services..volumes` | `[[volume]]` sections | One `[[volume]]` per mount | -| `services..environment` | `[env]` map | Key-value pairs in `[env]` section | -| `services..restart` | `restart` | Values: "no", "always", "on-failure", "unless-stopped" | -| `services..command` | `command` | String or array of strings | -| `services..entrypoint` | `entrypoint` | String or array of strings | -| `services..working_dir` | `working_dir` | String path | -| `services..user` | `user` | String (UID or UID:GID) | -| `services..hostname` | `hostname` | String | -| `services..networks` | `[[network]]` sections | One `[[network]]` per network reference | -| N/A | `memory` | New: Resource limits (e.g., "256m", "1g") | -| N/A | `cpus` | New: CPU limits (e.g., "0.5", "1.0") | - -### Dispenser-Specific Fields - -| Old Location | New Location | Notes | -|--------------|--------------|-------| -| `dispenser.toml: [[instance]].images` | `service.toml: [dispenser].watch` | `images` list → `watch = true/false` | -| `dispenser.toml: [[instance]].cron` | `service.toml: [dispenser].cron` | Same cron syntax | -| `dispenser.toml: [[instance]].initialize` | `service.toml: [dispenser].initialize` | Values: "immediately" or "on-trigger" | - -### Network Configuration - -| Old Location | New Location | Notes | -|--------------|--------------|-------| -| `docker-compose.yaml: networks` (top-level) | `dispenser.toml: [[network]]` | Networks declared centrally in main config | -| `docker-compose.yaml: services..networks` | `service.toml: [[network]]` sections | Services reference networks by name | - -## Complete Migration Examples - -### Example 3: Service with Volumes and Environment Variables - -**Old (docker-compose.yaml):** -```yaml -version: "3.8" -services: - webapp: - image: ${registry}/myapp:${version} - ports: - - "${app_port}:3000" - environment: - - NODE_ENV=production - - API_KEY=${api_key} - volumes: - - ./data:/app/data - - ./config:/app/config:ro - restart: unless-stopped -``` - -**New (service.toml):** -```toml -[service] -name = "webapp" -image = "${registry}/myapp:${version}" -memory = "512m" -cpus = "1.0" - -[[port]] -host = "${app_port}" -container = 3000 - -[env] -NODE_ENV = "production" -API_KEY = "${api_key}" - -[[volume]] -source = "./data" -target = "/app/data" - -[[volume]] -source = "./config" -target = "/app/config" -readonly = true - -[[network]] -name = "app-network" - -restart = "unless-stopped" - -[dispenser] -watch = true -initialize = "immediately" -``` - -**dispenser.toml entry:** -```toml -# Network declaration (moved from docker-compose.yaml) -[[network]] -name = "backend" -driver = "bridge" - -[[service]] -path = "postgres" -``` - -**dispenser.toml entry:** -```toml -[[network]] -name = "app-network" -driver = "bridge" - -[[service]] -path = "webapp" -``` - -### Example 4: Database Service with Networks - -**Old (docker-compose.yaml):** -```yaml -version: "3.8" -services: - postgres: - image: postgres:15 - ports: - - "5432:5432" - environment: - - POSTGRES_PASSWORD=${db_password} - - POSTGRES_USER=${db_user} - - POSTGRES_DB=${db_name} - volumes: - - pgdata:/var/lib/postgresql/data - networks: - - backend - restart: always - -volumes: - pgdata: - -networks: - backend: - driver: bridge -``` - -**New (service.toml):** -```toml -[service] -name = "postgres-db" -image = "postgres:15" -memory = "1g" -cpus = "2.0" - -[[port]] -host = 5432 -container = 5432 - -[env] -POSTGRES_PASSWORD = "${db_password}" -POSTGRES_USER = "${db_user}" -POSTGRES_DB = "${db_name}" - -[[volume]] -source = "pgdata" -target = "/var/lib/postgresql/data" - -[[network]] -name = "backend" - -restart = "always" - -[dispenser] -watch = true -initialize = "immediately" -``` - -### Example 5: Custom Command and Entrypoint - -**Old (docker-compose.yaml):** -```yaml -version: "3.8" -services: - worker: - image: ${docker_io}/python:3.11 - command: ["python", "worker.py", "--verbose"] - working_dir: /app - volumes: - - ./src:/app - restart: on-failure -``` - -**New (service.toml):** -```toml -[service] -name = "worker" -image = "${docker_io}/python:3.11" -command = ["python", "worker.py", "--verbose"] -working_dir = "/app" -memory = "256m" -cpus = "0.5" - -[[volume]] -source = "./src" -target = "/app" - -restart = "on-failure" - -[dispenser] -watch = true -initialize = "immediately" -``` - -### Example 6: One-Shot Task with Cron - -**Old (docker-compose.yaml):** -```yaml -version: "3.8" -services: - backup: - image: backup-tool:latest - volumes: - - ./backups:/backups - - ./data:/data:ro - restart: no -``` - -**Old (dispenser.toml entry):** -```toml -[[instance]] -path = "backup" -cron = "0 0 2 * * *" # Daily at 2 AM -initialize = "on-trigger" -images = [{ registry = "docker.io", name = "backup-tool", tag = "latest" }] -``` - -**New (service.toml):** -```toml -[service] -name = "backup-job" -image = "backup-tool:latest" -memory = "128m" -cpus = "0.5" - -[[volume]] -source = "./backups" -target = "/backups" - -[[volume]] -source = "./data" -target = "/data" -readonly = true - -restart = "no" - -[dispenser] -watch = true -initialize = "on-trigger" -cron = "0 0 2 * * *" # Daily at 2 AM -``` - -## Network Migration - -Networks are handled differently in the new structure. Instead of defining networks in each `docker-compose.yaml` file, they are now declared centrally in `dispenser.toml` and referenced by services. - -### Network Declaration Migration - -**Old approach** - Networks defined in docker-compose.yaml: -```yaml -version: "3.8" -services: - web: - image: nginx - networks: - - frontend - - backend - - db: - image: postgres - networks: - - backend - -networks: - frontend: - driver: bridge - backend: - driver: bridge -``` - -**New approach** - Networks declared in dispenser.toml: - -```toml -# dispenser.toml -delay = 60 - -# Declare all networks used by services -[[network]] -name = "frontend" -driver = "bridge" - -[[network]] -name = "backend" -driver = "bridge" - -[[service]] -path = "web" - -[[service]] -path = "db" -``` - -Then services reference these networks in their `service.toml`: - -```toml -# web/service.toml -[service] -name = "web" -image = "nginx" - -[[network]] -name = "frontend" - -[[network]] -name = "backend" - -[dispenser] -watch = true -initialize = "immediately" -``` - -```toml -# db/service.toml -[service] -name = "db" -image = "postgres" - -[[network]] -name = "backend" - -[dispenser] -watch = true -initialize = "immediately" -``` - -### Key Points - -1. **Central declaration**: All networks must be declared in `dispenser.toml` using `[[network]]` sections -2. **Service references**: Services reference networks using `[[network]]` sections (not an array) -3. **Multiple networks**: A service can reference multiple networks by having multiple `[[network]]` sections -4. **Network attributes**: Currently supported attributes in `dispenser.toml`: - - `name` (required): The network name - - `driver` (optional): Network driver (e.g., "bridge", "host", "overlay") -5. **Default network**: If no networks are specified, Docker uses a default network - -### Network Array Syntax vs Section Syntax - -Note the syntax difference for network references in service configuration: - -**Old docker-compose.yaml (array syntax):** -```yaml -services: - app: - networks: - - backend - - frontend -``` - -**New service.toml (section syntax):** -```toml -[[network]] -name = "backend" - -[[network]] -name = "frontend" -``` - -Each network reference requires its own `[[network]]` section with a `name` field. - -## Migration Checklist - -For each service in your project: - -- [ ] Create a new `service.toml` file in the service directory -- [ ] Copy the `[service]` section fields from `docker-compose.yaml`: - - [ ] `image` (with interpolation if used) - - [ ] `ports` → `[[port]]` sections - - [ ] `volumes` → `[[volume]]` sections - - [ ] `environment` → `[env]` map - - [ ] `restart` policy - - [ ] Other fields (`command`, `entrypoint`, `working_dir`, etc.) -- [ ] Add `[dispenser]` section with: - - [ ] `watch = true/false` (was `images` list present?) - - [ ] `initialize` (was it in `dispenser.toml`?) - - [ ] `cron` (if present in `dispenser.toml`) -- [ ] Optional: Add `memory` and `cpus` limits -- [ ] Update `dispenser.toml`: - - [ ] Change `[[instance]]` to `[[service]]` - - [ ] Remove all fields except `path` - - [ ] Add `[[network]]` declarations for any networks used (moved from docker-compose.yaml) -- [ ] Update service network references: - - [ ] Change `networks = ["name"]` array to `[[network]]` sections with `name` field -- [ ] Delete the old `docker-compose.yaml` file -6. **Volume readonly**: - - Old: `./config:/app/config:ro` - - New: `readonly = true` field in volume section -7. **Environment variables**: - - Old: `environment:` array in YAML - - New: `[env]` map with key-value pairs where keys are variable names -8. **Networks**: - - Old: Networks defined in each `docker-compose.yaml` file - - New: Networks declared centrally in `dispenser.toml` with `[[network]]`, services reference them with `[[network]]` sections -9. **Network references**: - - Old: `networks: ["backend"]` array in YAML - - New: `[[network]]` sections with `name` field in service.toml -10. **Resource limits**: New format supports `memory` and `cpus` fields that weren't available in the old format - -## Important Notes - -1. **Variable interpolation is identical**: Both formats use `${variable_name}` syntax -2. **Cron syntax unchanged**: The cron expression format remains the same -3. **Initialize values**: Use `"immediately"` or `"on-trigger"` (case-insensitive, can use underscores or hyphens) -4. **Watch behavior**: - - Old: Presence of `images` array meant watching for updates - - New: Explicit `watch = true/false` field -5. **Port syntax**: - - Old: `"8080:80"` in YAML - - New: `host = 8080` and `container = 80` in separate fields -6. **Volume readonly**: - - Old: `./config:/app/config:ro` - - New: `readonly = true` field in volume section -7. **Resource limits**: New format supports `memory` and `cpus` fields that weren't available in the old format - -## Troubleshooting - -### Common Issues - -**Issue**: Service not starting after migration -- **Check**: Verify all required fields are present in `[service]` section -- **Check**: Ensure `[dispenser]` section exists with `initialize` field - -**Issue**: Variables not interpolating -- **Check**: Variable names in `dispenser.vars` match those in `service.toml` -- **Check**: Syntax is `${variable_name}` not `$variable_name` or `{variable_name}` - -**Issue**: Cron jobs not triggering -- **Check**: `initialize = "on-trigger"` is set in `[dispenser]` section -- **Check**: `cron` field has valid cron expression - -**Issue**: Image updates not detected -- **Check**: `watch = true` in `[dispenser]` section -- **Check**: `delay` value in main `dispenser.toml` is reasonable - -## Additional Resources - -For more examples, compare the provided example directories: -- `example-old/` - Shows the old docker-compose structure -- `example-new/` - Shows the new service.toml structure - -Both directories contain functionally equivalent configurations that can serve as reference implementations. \ No newline at end of file diff --git a/MISSION.md b/MISSION.md new file mode 100644 index 0000000..fd0f66d --- /dev/null +++ b/MISSION.md @@ -0,0 +1,89 @@ +# Dispenser's mission. + +> This is a rough explanation of what the purpose of Dispenser is and why some technical decisions are made. + +Dispenser is meant to be a simple, declarative and deterministic approach +to deploying containers inside a virtual machine. + +## Built for CD from Day One + +Dispenser actually started its life as a continuous deployment (CD) solution first. + +One of the biggest gaps in the current ecosystem is that neither Docker Compose nor Kubernetes actually have CD built-in. With Compose, you're stuck writing bash scripts to pull images and restart services. With Kubernetes, you have to set up and manage entirely separate tools like ArgoCD or Flux just to keep your cluster in sync with your registry. + +Dispenser was born from the idea that **deployment should be a core feature of the orchestrator, not an afterthought.** + +## The Problem with "Standard" Tools + +Many existing solutions such as Kubernetes or Docker Compose work very well +and are mature tools. However, they have a problem: they are hard to manage correctly. + +With Kubernetes, it's hard to have a versioned, declarative state without a massive amount of boilerplate and external tooling. + +With Docker Compose, it's not very obvious where the files are. Things could be in +`/home//service/docker-compose.yaml` or anywhere else. This leads to "server drift" where nobody knows exactly what is running or why. + +Dispenser looks to: + +1. Minimize the effort to deploy a service. +2. Be declarative and "code first". A whole deployment should be able to be version controlled. +3. Simplify continuous deployment in environments where it's typically challenging. + +## Everything in its place: `/opt/dispenser` + +One of the biggest headaches with manual deployments is hunting for config files. Dispenser enforces a standard: everything lives in `/opt/dispenser`. + +By centralizing the configuration, we eliminate the "where is that compose file?" game. If it's running on the machine, the source of truth is in that directory. This makes backups, migrations, and debugging significantly easier. + +## Minimize the effort to deploy a service. + +By including things like a reverse proxy and scheduling, Dispenser seeks to +make deploying a new service as easy as having the containers +and their environment variables ready. + +## Declarative & Deterministic + +A Dispenser instance runs and owns what is declared in its configuration. If it's +not declared, it will not run. If you delete a service from your config, Dispenser deletes the container. This "reconciler" mindset ensures that the state of your VM matches your Git repo. + +## Simplify CD: Why Polling? + +Dispenser intentionally uses a **pull-based polling approach** for CD instead of the traditional push-based webhooks. + +**The Trade-off:** +* **The Bad:** It's not "instant." There’s a delay between pushing an image and the service updating (defaulting to 60 seconds). It also consumes a negligible amount of background I/O by checking the registry periodically. +* **The Good:** It’s incredibly resilient and firewall-friendly. + +Most CD tools require you to open a port so that GitHub or GitLab can "poke" your server when a build is done. In many on-premises or highly secure enterprise environments, whitelisting outbound traffic to a registry is standard, but allowing inbound traffic from the public internet is a non-starter. + +By polling, Dispenser eliminates the need for: + +1. **Public Endpoints:** Your server doesn't need a public IP or a domain name for CD to work. +2. **Webhook Secrets:** No need to manage and rotate tokens for callbacks. +3. **Complex Tunnels:** You don't need things like Ngrok or Cloudflare Tunnels just to get a deployment signal. + +It’s "pull-based" GitOps that works behind air-gapped firewalls, NATs, and VPNs without a single change to your networking infrastructure. + +## Secrets + +Managing secrets in a "code first" world is always a bit of a dance. You want your config in Git, but you definitely don't want your database password there. + +Dispenser handles this in two ways: + +1. **The Local Way:** You can use variable files like `prod.dispenser.vars`. By adding `*.dispenser.vars` to your `.gitignore`, you can keep your secrets on the machine and your logic in the repo. Dispenser will merge them at runtime. +2. **The Cloud Way:** If you're running on GCP, Dispenser can talk directly to Google Secret Manager. You just reference the secret name in your vars, and Dispenser fetches it. No more manual env file syncing. + +## Batteries-Included Networking (The Proxy) + +Usually, setting up a container means you also have to set up Nginx or Caddy, handle Certbot for SSL, and hope the config doesn't break when you restart. + +Dispenser has a built-in reverse proxy powered by Pingora (the stuff Cloudflare uses). It handles: +* **Automatic SSL:** Just give it an email, and it talks to Let's Encrypt for you. +* **Service Discovery:** You don't need to know IP addresses. If your service is named `api`, the proxy knows how to find it. +* **Zero-Downtime (Roadmap):** While currently Dispenser restarts containers when updated, true zero-downtime handoffs (spinning up the new one and health-checking it before killing the old one) is a primary goal on our roadmap. + +## Scheduling: Cron, but for containers. + +Sometimes you don't need a service running 24/7. Maybe you just need to run a backup at 2 AM or an ETL job every hour. + +Instead of messing with the system's `crontab` and writing messy shell scripts to run `docker run`, you just add a `cron` field to your service config. Dispenser treats scheduled jobs as first-class citizens—it'll pull the image, run the container, and clean up afterwards. diff --git a/NETWORKS.md b/NETWORKS.md index 8b4777e..e2ebf21 100644 --- a/NETWORKS.md +++ b/NETWORKS.md @@ -6,9 +6,71 @@ This document describes how to configure Docker networks in Dispenser. Dispenser supports Docker networks to enable communication between services. Networks are declared in `dispenser.toml` and referenced in individual service configurations. +## Default Dispenser Network + +Dispenser automatically creates and manages a default network called `dispenser` that **all containers are connected to**. This network provides: + +- **Automatic inter-container communication**: All containers can communicate with each other using their service names as hostnames +- **Predictable IP addresses**: The network uses a dedicated subnet (`172.28.0.0/16`) with gateway `172.28.0.1` +- **No configuration required**: The network is created automatically when Dispenser starts and removed on shutdown + +### Network Details + +| Property | Value | +|----------|-------| +| Name | `dispenser` | +| Driver | `bridge` | +| Subnet | `172.28.0.0/16` | +| Gateway | `172.28.0.1` | +| Attachable | `true` | + +### Accessing Containers by IP + +Since all containers are connected to the dispenser network with a known subnet, you can: + +1. **Use service names as hostnames** (recommended): Containers can reach each other using their service name (e.g., `http://my-api:8080`) +2. **Use assigned IP addresses**: Docker automatically assigns IP addresses from the `172.28.0.0/16` range + +### Example + +With the default network, two services can communicate without any explicit network configuration: + +```toml +# api/service.toml +[service] +name = "api" +image = "my-api:latest" + +[[port]] +host = 8080 +container = 8080 + +[dispenser] +watch = true +``` + +```toml +# worker/service.toml +[service] +name = "worker" +image = "my-worker:latest" + +[env] +API_URL = "http://api:8080" # Can reach the api service by name + +[dispenser] +watch = true +``` + +Both containers are automatically connected to the `dispenser` network and can communicate using their service names. + +## User-Defined Networks + +In addition to the default dispenser network, you can define custom networks for more fine-grained control over container communication. + ## Network Declaration -Networks must be declared in your `dispenser.toml` file before they can be referenced by services. +Custom networks must be declared in your `dispenser.toml` file before they can be referenced by services. These networks are **in addition to** the default `dispenser` network. ### Basic Network Declaration @@ -370,6 +432,26 @@ When `external = true`, Dispenser will not attempt to create or delete the netwo ## Troubleshooting +### Default Dispenser Network Issues + +The `dispenser` network is automatically created when Dispenser starts. If you encounter issues: + +1. **Network already exists from previous run**: If Dispenser didn't shut down cleanly, the network may still exist. Remove it manually: + ```sh + docker network rm dispenser + ``` + +2. **Subnet conflict**: The default subnet `172.28.0.0/16` may conflict with existing networks. Check for conflicts: + ```sh + docker network ls + docker network inspect dispenser + ``` + +3. **Viewing container IPs on the dispenser network**: + ```sh + docker network inspect dispenser --format '{{range .Containers}}{{.Name}}: {{.IPv4Address}}{{"\n"}}{{end}}' + ``` + ### Network Already Exists If you see an error that a network already exists, either: @@ -379,7 +461,7 @@ If you see an error that a network already exists, either: ### Services Cannot Communicate Ensure that: -1. Both services are connected to the same network +1. Both services are connected to the same network (all services are on the `dispenser` network by default) 2. You're using the correct service name as the hostname 3. The network is not marked as `internal` if internet access is needed 4. Firewall rules are not blocking traffic diff --git a/PROXY.md b/PROXY.md new file mode 100644 index 0000000..3c3654d --- /dev/null +++ b/PROXY.md @@ -0,0 +1,120 @@ +# Reverse Proxy Configuration + +Dispenser includes a built-in, high-performance reverse proxy powered by [Pingora](https://github.com/cloudflare/pingora). It automatically handles TLS termination, ACME certificate management (Let's Encrypt), and intelligent routing to your containerized services. + +## Overview + +The Dispenser proxy listens on ports 80 (HTTP) and 443 (HTTPS). It automatically: +1. Redirects all HTTP traffic to HTTPS. +2. Routes incoming requests to the correct container based on the `Host` header. +3. Manages SSL/TLS certificates via Let's Encrypt or self-signed "simulation" mode. +4. Handles Zero-Downtime reloads when configuration changes or certificates are updated. + +## Global Toggle + +The reverse proxy is enabled by default. You can explicitly enable or disable it in your main `dispenser.toml` file. When disabled, both the proxy server (ports 80/443) and the automatic certificate maintenance tasks are turned off. + +```toml +# dispenser.toml + +[proxy] +enabled = false +``` + +> [!IMPORTANT] +> Enabling or disabling the proxy via the `enabled` flag requires a full process restart. Changing this value and reloading with `dispenser -s reload` will result in a warning and the change will not take effect until the next full start. + +## Service Configuration + +To expose a service through the proxy, add a `[proxy]` section to your `service.toml`. + +```toml +# my-app/service.toml + +[service] +name = "web-app" +image = "my-registry/web-app:latest" + +[proxy] +# The domain name the proxy should listen for +host = "app.example.com" +# The port the service is listening on INSIDE the container +service_port = 8080 +``` + +### Proxy Settings Reference + +| Field | Type | Description | +| :--- | :--- | :--- | +| `host` | `string` | The FQDN (Fully Qualified Domain Name) for this service. | +| `service_port` | `u16` | The private port inside the container where the app is running. | +| `cert_file` | `string` | (Optional) Path to a custom SSL certificate file. | +| `key_file` | `string` | (Optional) Path to a custom SSL private key file. | + +## SSL/TLS Management + +Dispenser provides three ways to handle SSL certificates: + +### 1. Automatic ACME (Let's Encrypt) +If you provide an email address in the `[certbot]` section of your main `dispenser.toml`, Dispenser will automatically negotiate certificates with Let's Encrypt using the HTTP-01 challenge. + +> [!NOTE] +> The `[certbot]` section must be explicitly defined. If it is missing, Dispenser assumes you are providing custom certificates manually via `cert_file` and `key_file` in your `service.toml`, or it will attempt to use simulation mode if running via the `dev` command. + +```toml +# dispenser.toml +delay = 60 + +[certbot] +email = "admin@example.com" + +[[service]] +path = "my-app" +``` + +Dispenser handles the challenge internally. Ensure your server is accessible on port 80 from the internet. + +### 2. Manual Certificates +If you already have certificates (e.g., from a corporate CA or Wildcard cert), you can specify them in the `service.toml`. + +```toml +[proxy] +host = "internal.example.com" +service_port = 80 +cert_file = "/etc/ssl/certs/internal.crt" +key_file = "/etc/ssl/certs/internal.key" +``` + +### 3. Simulation Mode (Self-Signed) +For local development or environments without public DNS, you can run Dispenser in simulation mode using the `dev` command. It will generate self-signed certificates for all configured hosts on the fly. + +```bash +dispenser dev -s my-app +``` + +## How Routing Works + +1. **Request Arrival**: A request arrives at Dispenser on port 443. +2. **SNI Matching**: The proxy looks at the Server Name Indication (SNI) to select the correct SSL certificate. +3. **Host Matching**: Once the TLS handshake is complete, the proxy looks at the `Host` HTTP header. +4. **Upstream Resolution**: It finds the container matching that host and forwards the request to the container's internal IP address on the specified `service_port`. + +### Internal Networking +The proxy communicates with containers over the default `dispenser` network (`172.28.0.0/16`). You do not need to expose ports to the host machine (via `[[port]]`) for the proxy to work; it communicates directly with the container's private IP. + +## Zero-Downtime Reloads + +When you reload Dispenser (`dispenser -s reload`) or when certificates are renewed: +1. Dispenser starts a new "generation" of the proxy. +2. The new proxy starts listening for new connections. +3. The old proxy stops accepting new connections but finishes processing existing ones. +4. Once all old connections are drained, the old proxy instance exits. + +## Troubleshooting + +- **502 Bad Gateway**: This usually means the container is not running or the `service_port` defined in `service.toml` is incorrect. +- **Connection Refused**: Ensure the `dispenser` process is running and has permission to bind to ports 80 and 443 (this usually requires `sudo` or specific capabilities). +- **Certificate Errors**: + - Check the logs: `journalctl -u dispenser -f`. + - If using Let's Encrypt, ensure port 80 is open to the world. + - Certificates are stored in `.dispenser/certs` relative to the working directory. \ No newline at end of file diff --git a/README.md b/README.md index ce37dd9..1dd0ffd 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,8 @@ # Dispenser -This tool manages containerized applications by continuously monitoring your artifact registry for new versions of Docker images. When updates are detected, dispenser automatically deploys the new versions of your services with zero downtime, updating the running containers on the host machine. +Dispenser is a simple, declarative, and deterministic container orchestrator designed for single virtual machines. It combines continuous deployment (CD), a built-in reverse proxy with automatic SSL, and cron scheduling into a single binary, eliminating the need for complex external tooling or manual bash scripts. + +This tool manages containerized applications by continuously monitoring your artifact registry for new versions of Docker images. When updates are detected, dispenser automatically redeploys your services, ensuring the running containers on the host machine match the latest versions in your registry. dispenser operates as a daemon that runs in the background on the host server that watches your artifact registry, detecting when new versions of your container images are published. @@ -8,10 +10,11 @@ dispenser operates as a daemon that runs in the background on the host server th - **[CLI Reference](CLI.md)** - Complete command-line options and usage - **[Service Configuration](SERVICE_CONFIG.md)** - Detailed `service.toml` reference +- **[Reverse Proxy](PROXY.md)** - Built-in proxy and SSL management - **[Network Configuration](NETWORKS.md)** - Docker network setup guide - **[Cron Scheduling](CRON.md)** - Scheduled deployments - **[GCP Secrets](GCP.md)** - Google Secret Manager integration -- **[Migration Guide](MIGRATION_GUIDE.md)** - Migrating from Docker Compose + ## Prerequisites @@ -30,9 +33,9 @@ Download the latest `.deb` or `.rpm` package from the [releases page](https://gi ```sh # Download the .deb package -# wget https://github.com/ixpantia/dispenser/releases/download/v0.8.0/dispenser-0.8.0-0.x86_64.deb +# wget https://github.com/ixpantia/dispenser/releases/download/v0.10.0/dispenser-0.10.0.0-0.x86_64.deb -sudo apt install ./dispenser-0.8.0-0.x86_64.deb +sudo apt install ./dispenser-0.10.0.0-0.x86_64.deb ``` ### RHEL / CentOS / Fedora @@ -41,7 +44,7 @@ sudo apt install ./dispenser-0.8.0-0.x86_64.deb # Download the .rpm package # wget ... -sudo dnf install ./dispenser-0.8.0-0.x86_64.rpm +sudo dnf install ./dispenser-0.10.0.0-0.x86_64.rpm ``` The installation process will: @@ -252,9 +255,13 @@ This is useful for reusing the same configuration in multiple deployments. ### Step 7: Working with Networks (Optional) -Dispenser supports Docker networks to enable communication between services. Networks are declared in `dispenser.toml` and referenced in individual service configurations. +Dispenser automatically creates a default network called `dispenser` that **all containers are connected to**. This network uses the subnet `172.28.0.0/16` with gateway `172.28.0.1`, allowing all your services to communicate with each other using their service names as hostnames without any configuration. + +For example, if you have two services `api` and `postgres`, the `api` service can connect to the database using `postgres` as the hostname (e.g., `postgres://postgres:5432/mydb`). -1. Declare networks in your `dispenser.toml`. +In addition to the default network, you can declare custom networks in `dispenser.toml` for more fine-grained control over container communication. + +1. Declare custom networks in your `dispenser.toml`. ```toml delay = 60 @@ -308,7 +315,36 @@ Dispenser supports Docker networks to enable communication between services. Net initialize = "immediately" ``` -Now both services can communicate with each other using their service names as hostnames. + ### Step 9: Reverse Proxy and SSL + + Dispenser includes a built-in reverse proxy that handles TLS termination and routes traffic to your services using the `Host` header. The proxy is enabled by default and listens on ports 80 and 443. You can explicitly disable it in your main `dispenser.toml` if you are using an external proxy. + + 1. Add a `[proxy]` block to your `service.toml`. + + ```toml + [proxy] + host = "app.example.com" + service_port = 8080 + ``` + + 2. (Optional) Enable Let's Encrypt in `dispenser.toml` to automatically manage certificates. **Note:** This section must be explicitly added; otherwise, Dispenser expects manual certificates. + + ```toml + [certbot] + email = "admin@example.com" + ``` + + 3. (Optional) Disable the proxy globally in `dispenser.toml`. **Note:** Changing this setting requires a full process restart to take effect. + + ```toml + [proxy] + enabled = false + ``` + + For more details, see the [Reverse Proxy Guide](PROXY.md). + + ### Step 10: Validating Configuration +Now both services can communicate with each other using their service names as hostnames. Note that even without the explicit `[[network]]` declarations, both services would still be able to communicate via the default `dispenser` network. For advanced network configuration including external networks, internal networks, labels, and different drivers, see the [Network Configuration Guide](NETWORKS.md). @@ -390,7 +426,21 @@ No referenced variables ------------------------------------------------------------------------------- ``` -### Step 10: Start and Verify the Deployment +### Step 11: Local Development with `dev` mode + +For local development or testing, use the `dev` subcommand to run specific services without loading your entire stack. + +```sh +# Run only the 'api' service with self-signed certificates +dispenser dev -s api +``` + +The `dev` command: +- **Implicitly enables simulation**: Generates self-signed certificates on the fly for all proxy hosts. +- **Selective loading**: Only reads and renders configuration for services matching the filter. +- **Dependency pruning**: Automatically removes dependencies on services that are not part of the current run, so your services start immediately. + +### Step 12: Start and Verify the Deployment 1. Exit the `dispenser` user session to return to your regular user. ```sh @@ -446,6 +496,7 @@ dispenser -s stop - **[CLI Reference](CLI.md)** - All command-line flags and options - **[Service Configuration Reference](SERVICE_CONFIG.md)** - Complete field documentation +- **[Reverse Proxy](PROXY.md)** - Proxy and SSL configuration - **[Network Configuration Guide](NETWORKS.md)** - Advanced networking setup - **[Cron Documentation](CRON.md)** - Scheduled deployments - **[GCP Secrets Integration](GCP.md)** - Using Google Secret Manager diff --git a/SERVICE_CONFIG.md b/SERVICE_CONFIG.md index bd14f18..5fe5452 100644 --- a/SERVICE_CONFIG.md +++ b/SERVICE_CONFIG.md @@ -33,6 +33,9 @@ restart = "policy" [depends_on] # Service dependencies + +[proxy] +# Reverse proxy configuration ``` ## Service Section @@ -381,6 +384,47 @@ migration = "service-completed" - `service-started` or `started` - Wait for service to start - `service-completed` or `completed` - Wait for service to complete +## Proxy Configuration + +The `[proxy]` section configures the built-in reverse proxy to route traffic to this service. These settings only take effect if the proxy is enabled globally in your main `dispenser.toml` file (enabled by default). Note that enabling/disabling the proxy globally requires a full process restart. + +### `host` (required) + +The domain name (FQDN) that this service should respond to. + +```toml +[proxy] +host = "app.example.com" +``` + +### `service_port` (required) + +The port the application is listening on inside the container. This is where the proxy will forward traffic. + +```toml +[proxy] +host = "app.example.com" +service_port = 8080 +``` + +### `cert_file` (optional) + +Path to a custom SSL certificate file (PEM format). If not provided, Dispenser uses Let's Encrypt only if the `[certbot]` section is explicitly defined in your main `dispenser.toml`. If `[certbot]` is missing, Dispenser expects manual certificates here (or will use simulation mode if running the `dev` command). + +### `key_file` (optional) + +Path to the private key file for the custom certificate. + +```toml +[proxy] +host = "internal.example.com" +service_port = 80 +cert_file = "/etc/ssl/certs/internal.crt" +key_file = "/etc/ssl/certs/internal.key" +``` + +See [PROXY.md](PROXY.md) for more details on the reverse proxy. + ## Complete Examples ### Basic Web Application @@ -606,6 +650,6 @@ This will check for: ## See Also - [CLI Reference](CLI.md) - Command-line options +- [Reverse Proxy](PROXY.md) - Proxy and SSL configuration - [Network Configuration](NETWORKS.md) - Detailed network setup - [CRON Documentation](CRON.md) - Scheduling reference -- [Migration Guide](MIGRATION_GUIDE.md) - Migrating from Docker Compose diff --git a/deb/DEBIAN/control b/deb/DEBIAN/control index 07ec0a4..c2dcc64 100644 --- a/deb/DEBIAN/control +++ b/deb/DEBIAN/control @@ -2,5 +2,5 @@ Package: dispenser Version: VERSION_PLACEHOLDER Maintainer: ixpantia S.A. Architecture: amd64 -Description: Continously Deploy services with Docker Compose +Description: Continously Deploy Containerized Services Depends: docker-ce, docker-ce-cli, containerd.io, docker-buildx-plugin, docker-compose-plugin, gnupg2, pass diff --git a/example-new/.gitignore b/example-new/.gitignore deleted file mode 100644 index 6702b47..0000000 --- a/example-new/.gitignore +++ /dev/null @@ -1 +0,0 @@ -dispenser.pid diff --git a/example-new/nginx/html/index.html b/example-new/nginx/html/index.html deleted file mode 100644 index 2bf5624..0000000 --- a/example-new/nginx/html/index.html +++ /dev/null @@ -1 +0,0 @@ -

Welcome to Dispenser

diff --git a/example-old/.gitignore b/example-old/.gitignore deleted file mode 100644 index 6702b47..0000000 --- a/example-old/.gitignore +++ /dev/null @@ -1 +0,0 @@ -dispenser.pid diff --git a/example-old/dispenser.toml b/example-old/dispenser.toml deleted file mode 100644 index 60ed80e..0000000 --- a/example-old/dispenser.toml +++ /dev/null @@ -1,17 +0,0 @@ -delay = 60 - -[[instance]] -path = "nginx" -# This service will be started immediately on startup, which is the default behavior. -# It will be updated if a new 'nginx:latest' image is detected. -images = [{ registry = "${docker_io}", name = "nginx", tag = "latest" }] - -[[instance]] -path = "hello-world" -cron = "*/10 * * * * *" -# This service will only be initialized when a trigger occurs. In this case, -# the cron schedule will trigger it every 10 seconds. It will not be started -# on application launch. -# -# The value can be "on-trigger", "OnTrigger", "on_trigger", or "on trigger". -initialize = "on-trigger" diff --git a/example-old/dispenser.vars b/example-old/dispenser.vars deleted file mode 100644 index 3585319..0000000 --- a/example-old/dispenser.vars +++ /dev/null @@ -1 +0,0 @@ -docker_io="docker.io" diff --git a/example-old/hello-world/docker-compose.yaml b/example-old/hello-world/docker-compose.yaml deleted file mode 100644 index fe5fa5e..0000000 --- a/example-old/hello-world/docker-compose.yaml +++ /dev/null @@ -1,5 +0,0 @@ -version: "3.8" -services: - hello-world: - image: hello-world - restart: no diff --git a/example-old/nginx/docker-compose.yaml b/example-old/nginx/docker-compose.yaml deleted file mode 100644 index baab2a9..0000000 --- a/example-old/nginx/docker-compose.yaml +++ /dev/null @@ -1,6 +0,0 @@ -version: "3.8" -services: - nginx: - image: ${docker_io}/nginx:latest - ports: - - "8080:80" diff --git a/example/.gitignore b/example/.gitignore new file mode 100644 index 0000000..c074f7a --- /dev/null +++ b/example/.gitignore @@ -0,0 +1,3 @@ +dispenser.pid +certs/ +.dispenser diff --git a/example-new/dispenser.toml b/example/dispenser.toml similarity index 63% rename from example-new/dispenser.toml rename to example/dispenser.toml index c9286e0..1470f7b 100644 --- a/example-new/dispenser.toml +++ b/example/dispenser.toml @@ -1,12 +1,14 @@ # Delay in seconds between polling for new images (default: 60) delay = 60 -[[network]] -name = "dispenser-net" -driver = "bridge" +[proxy] +enabled = true [[service]] -path = "nginx" +path = "service1" + +[[service]] +path = "service2" [[service]] path = "hello-world" diff --git a/example-new/dispenser.vars b/example/dispenser.vars similarity index 100% rename from example-new/dispenser.vars rename to example/dispenser.vars diff --git a/example-new/hello-world/service.toml b/example/hello-world/service.toml similarity index 94% rename from example-new/hello-world/service.toml rename to example/hello-world/service.toml index c27e61f..93f3514 100644 --- a/example-new/hello-world/service.toml +++ b/example/hello-world/service.toml @@ -11,9 +11,6 @@ cpus = "0.5" # CPU limit (e.g., "0.5", "1.0", "2.0") # Restart policy (optional, defaults to "No") restart = "no" -[[network]] -name = "dispenser-net" - # Dispenser-specific configuration (required) [dispenser] # Don't watch for image updates diff --git a/example/service1/html/index.html b/example/service1/html/index.html new file mode 100644 index 0000000..dce018f --- /dev/null +++ b/example/service1/html/index.html @@ -0,0 +1 @@ +

Welcome to Service 1

diff --git a/example/service1/service.toml b/example/service1/service.toml new file mode 100644 index 0000000..e37dc71 --- /dev/null +++ b/example/service1/service.toml @@ -0,0 +1,28 @@ +# Service configuration for nginx + +# Service metadata (required) +[service] +name = "nginx-service-1" +image = "${docker_io}/nginx:latest" +# Optional: Resource limits +memory = "256m" # Memory limit (e.g., "512m", "1g", "2g") +cpus = "1.0" # CPU limit (e.g., "0.5", "1.0", "2.0") + +[proxy] +host = "service1" +service_port = 80 + +[[volume]] +source = "./html" +target = "/usr/share/nginx/html" + +# Restart policy (optional, defaults to "No") +restart = "always" + +# Dispenser-specific configuration (required) +[dispenser] +# Watch for image updates +watch = true + +# Initialize immediately on startup (default behavior) +initialize = "immediately" diff --git a/example/service2/html/index.html b/example/service2/html/index.html new file mode 100644 index 0000000..6fc9fab --- /dev/null +++ b/example/service2/html/index.html @@ -0,0 +1 @@ +

Welcome to Service 2

diff --git a/example-new/nginx/service.toml b/example/service2/service.toml similarity index 82% rename from example-new/nginx/service.toml rename to example/service2/service.toml index f0b761d..ca8bb86 100644 --- a/example-new/nginx/service.toml +++ b/example/service2/service.toml @@ -2,25 +2,21 @@ # Service metadata (required) [service] -name = "nginx-service" +name = "nginx-service-2" image = "${docker_io}/nginx:latest" # Optional: Resource limits memory = "256m" # Memory limit (e.g., "512m", "1g", "2g") cpus = "1.0" # CPU limit (e.g., "0.5", "1.0", "2.0") -# Port mappings (optional) -[[port]] -host = 8080 -container = 80 +[proxy] +host = "service2" +service_port = 80 + [[volume]] source = "./html" target = "/usr/share/nginx/html" -[[network]] -name = "dispenser-net" - - # Restart policy (optional, defaults to "No") restart = "always" diff --git a/rpm/dispenser.spec b/rpm/dispenser.spec index c60a524..13590c7 100644 --- a/rpm/dispenser.spec +++ b/rpm/dispenser.spec @@ -1,7 +1,7 @@ Name: dispenser Version: %{version} Release: 0 -Summary: Continously Deploy services with Docker Compose +Summary: Continously Deploy Containerized Services License: see /usr/share/doc/dispenser/copyright Distribution: Debian Group: Converted/unknown diff --git a/src/cli.rs b/src/cli.rs index be974c4..eea4741 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -1,6 +1,6 @@ use std::{path::PathBuf, sync::OnceLock}; -use clap::{Parser, ValueEnum}; +use clap::{Parser, Subcommand, ValueEnum}; /// Continuous delivery for un-complicated infrastructure. #[derive(Parser, Debug)] @@ -21,6 +21,19 @@ pub struct Args { /// Send a signal to the running dispenser instance #[arg(short, long)] pub signal: Option, + + #[command(subcommand)] + pub command: Option, +} + +#[derive(Subcommand, Debug, Clone)] +pub enum Commands { + /// Local development mode with selective service loading. + Dev { + /// Only run the specified services. Matches service path name. + #[arg(short, long = "service")] + services: Option>, + }, } #[derive(Clone, Debug, ValueEnum)] diff --git a/src/main.rs b/src/main.rs index ea5fc91..c59dacc 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,57 +1,82 @@ use std::{process::ExitCode, sync::Arc}; +use tokio::sync::{Mutex, Notify}; -use crate::service::{ - manager::{ServiceMangerConfig, ServicesManager}, - vars::ServiceConfigError, +use crate::{ + cli::Commands, + proxy::{acme, run_dummy_proxy, run_proxy, ProxySignals}, + service::{ + manager::{ServiceMangerConfig, ServicesManager}, + vars::ServiceConfigError, + }, }; -use tokio::sync::Mutex; + mod cli; +mod proxy; mod secrets; mod service; mod signals; #[tokio::main] async fn main() -> ExitCode { - if let Some(signal) = &cli::get_cli_args().signal { - return signals::send_signal(signal.clone()); - } - let service_manager_config = match ServiceMangerConfig::try_init().await { - Ok(conf) => conf, - Err(e) => { - match e { - ServiceConfigError::Template((path, template_err)) => { - eprintln!("Could not render {path:#?}: {:#}", template_err); - } - _ => { - eprintln!("Error initializing service manager: {}", e); - } - } - return ExitCode::FAILURE; - } - }; + let args = cli::get_cli_args(); - // If the user set the test flag it - // will just validate the config - if cli::get_cli_args().test { - eprintln!("Dispenser config is ok."); - return ExitCode::SUCCESS; + if let Some(signal) = &args.signal { + return signals::send_signal(signal.clone()).await; } - // Initialize the loggr + // Initialize the logger env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("info")).init(); + let service_filter = match &args.command { + Some(Commands::Dev { services }) => services.as_ref().map(Vec::as_slice), + _ => None, + }; + + if args.test { + match ServiceMangerConfig::try_init(service_filter).await { + Ok(_) => { + eprintln!("Dispenser config is ok."); + return ExitCode::SUCCESS; + } + Err(e) => { + eprintln!("Error validating config: {}", e); + return ExitCode::FAILURE; + } + } + } + log::info!("Dispenser running with PID: {}", std::process::id()); - if let Err(err) = std::fs::write( - &cli::get_cli_args().pid_file, - std::process::id().to_string(), - ) { + if let Err(err) = tokio::fs::write(&args.pid_file, std::process::id().to_string()).await { log::error!("Unable to write pid file: {err}"); return ExitCode::FAILURE; } - let manager = match ServicesManager::from_config(service_manager_config).await { - Ok(manager) => Arc::new(manager), + // Signals for lifecycle control + let reload_signal = Arc::new(Notify::new()); + let shutdown_signal = Arc::new(Notify::new()); + let proxy_restart_notify = Arc::new(Notify::new()); + + signals::handle_reload(reload_signal.clone()); + signals::handle_sigint(shutdown_signal.clone()); + + // Initial manager setup + let service_manager_config = match ServiceMangerConfig::try_init(service_filter).await { + Ok(conf) => conf, + Err(e) => { + match e { + ServiceConfigError::Template((path, e)) => { + log::error!("Error rendering: {path:?}"); + log::error!("{e}"); + } + e => log::error!("Failed to initialize config: {e}"), + } + return ExitCode::FAILURE; + } + }; + + let manager = match ServicesManager::from_config(service_manager_config, None).await { + Ok(m) => Arc::new(m), Err(e) => { log::error!("Failed to create services manager: {e}"); return ExitCode::FAILURE; @@ -60,49 +85,101 @@ async fn main() -> ExitCode { if let Err(e) = manager.validate_containers_not_present().await { log::error!("{e}"); - log::error!("It seems that some of the containers declared already exist. This prevents dispenser from properly managing the life-cycle of these containers. Please remove them and restart dispenser."); - std::process::exit(1); + log::error!("Containers already exist. Please remove them and restart dispenser."); + return ExitCode::FAILURE; } - // Wrap the manager in a Mutex so we can replace it on reload - let manager_holder = Arc::new(Mutex::new(manager)); + // This is at a restart level. + let proxy_enabled = manager.proxy_enabled(); - // Create a notification channel for reload signals - let reload_signal = Arc::new(tokio::sync::Notify::new()); - let shutdown_signal = Arc::new(tokio::sync::Notify::new()); + let manager_holder = Arc::new(Mutex::new(manager)); + let proxy_signals = ProxySignals::new(); - // Initialize signal handlers for the new system - signals::handle_reload(reload_signal.clone()); - signals::handle_sigint(shutdown_signal.clone()); + // Start dummy proxy to hold the signal lock + if proxy_enabled { + tokio::task::spawn_blocking({ + let signals = proxy_signals.clone(); + move || run_dummy_proxy(signals) + }); + } let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]); - // Main loop: start polling and wait for reload signals loop { + // OUTER LOOP: Manager Lifecycle let current_manager = manager_holder.lock().await.clone(); - tokio::select! { - _ = current_manager.start_polling() => { - // Polling ended normally (shouldn't happen unless cancelled) - log::info!("Polling ended"); + // 1. Polling Task (Maintains 'init' and timer state) + let polling_handle = tokio::spawn({ + let manager = current_manager.clone(); + async move { manager.start_polling().await } + }); + + // 2. ACME Task (Watchdog for certificates) + let acme_handle = proxy_enabled.then(|| { + tokio::spawn(acme::maintain_certificates( + current_manager.clone(), + proxy_restart_notify.clone(), + service_filter.is_some(), // If the filters exists we are in dev mode. + )) + }); + + // Inner proxy loop; + loop { + if proxy_enabled { + // INNER LOOP: Proxy Lifecycle + log::info!("Starting proxy instance..."); + + // Start Proxy (Blocking in a thread) + std::thread::spawn({ + let manager = current_manager.clone(); + let signals = proxy_signals.clone(); + move || run_proxy(manager, signals) + }); + + // Handover: Signal the previous proxy (dummy or old generation) to gracefully upgrade + // This releases the Mutex lock in ProxySignals, allowing the new proxy to start listening. + proxy_signals + .send_signal(pingora::server::ShutdownSignal::GracefulUpgrade) + .await; } - _ = reload_signal.notified() => { - // Reload signal received - if let Err(e) = signals::reload_manager(manager_holder.clone()).await { - log::error!("Reload failed: {e}"); - // Continue with the old manager - } else { - log::info!("Starting new manager..."); - // Continue the loop with the new manager + + tokio::select! { + _ = proxy_restart_notify.notified() => { + log::info!("Certificates updated. Restarting proxy..."); + continue; // inner loop: start a new proxy instance } - } - _ = shutdown_signal.notified() => { - // Reload signal received - if let Err(e) = signals::sigint_manager(manager_holder.clone()).await { - log::error!("Shutdown failed: {e}"); - // Continue with the old manager + _ = reload_signal.notified() => { + log::info!("Reload signal received. Refreshing manager..."); + + // Abort manager-bound tasks + polling_handle.abort(); + acme_handle.map(|t| t.abort()); + + if let Err(e) = signals::reload_manager(manager_holder.clone(), service_filter).await { + log::error!("Reload failed: {e}"); + } + + break; // inner loop -> outer loop to restart manager tasks + } + _ = shutdown_signal.notified() => { + log::info!("Shutdown signal received. Exiting..."); + + // Abort manager-bound tasks + polling_handle.abort(); + acme_handle.map(|t| t.abort()); + + let manager = manager_holder.lock().await; + manager.cancel().await; + manager.shutdown().await; + + proxy_signals.send_signal(pingora::server::ShutdownSignal::GracefulTerminate).await; + + let _ = tokio::fs::remove_file(&cli::get_cli_args().pid_file).await; + + // Exit the process + std::process::exit(0); } - std::process::exit(0); } } } diff --git a/src/proxy/acme.rs b/src/proxy/acme.rs new file mode 100644 index 0000000..bd705ed --- /dev/null +++ b/src/proxy/acme.rs @@ -0,0 +1,204 @@ +use std::path::Path; +use std::sync::Arc; +use std::time::Duration; + +use instant_acme::{ + Account, ChallengeType, Identifier, LetsEncrypt, NewAccount, NewOrder, OrderStatus, RetryPolicy, +}; +use log::{error, info}; +use rcgen::{CertificateParams, DistinguishedName, KeyPair, SanType}; +use tokio::sync::Notify; +use x509_parser::prelude::*; + +use crate::service::{file::CertbotSettings, manager::ServicesManager}; + +const CERTS_DIR: &str = ".dispenser/certs"; +const CHALLENGES_DIR: &str = ".dispenser/challenges"; +const RENEW_BEFORE_DAYS: i64 = 30; + +/// Background task that ensures all managed hosts have valid SSL certificates. +/// It supports both a simulation mode (self-signed) and ACME (Let's Encrypt). +pub async fn maintain_certificates( + manager: Arc, + notify: Arc, + simulate: bool, +) { + // Ensure directories exist + let _ = tokio::fs::create_dir_all(CERTS_DIR).await; + let _ = tokio::fs::create_dir_all(CHALLENGES_DIR).await; + let settings = manager.get_certbot_settings(); + + loop { + info!("Starting certificate maintenance check..."); + let mut changed = false; + + let proxy_configs = manager.get_proxy_configs(); + for proxy in proxy_configs { + let host = &proxy.host; + + // Skip if manually configured + if proxy.cert_file.is_some() || proxy.key_file.is_some() { + continue; + } + + if simulate { + if ensure_simulated_cert(host).await { + changed = true; + } + } else { + if let Some(settings) = &settings { + match ensure_acme_cert(&settings, host).await { + Ok(true) => changed = true, + Ok(false) => {} + Err(e) => error!("ACME error for {}: {}", host, e), + } + } + } + } + + if changed { + info!("Certificates updated, notifying proxy for reload."); + notify.notify_one(); + } + + // Check every hour + tokio::time::sleep(Duration::from_secs(3600)).await; + } +} + +/// Checks if a certificate exists and is valid for at least 30 days. +async fn needs_renewal(host: &str) -> bool { + let cert_path = Path::new(CERTS_DIR).join(format!("{}.crt", host)); + if !cert_path.exists() { + return true; + } + + let Ok(content) = tokio::fs::read(&cert_path).await else { + return true; + }; + + // Simple check using x509-parser + let cert_der = if content.starts_with(b"-----BEGIN CERTIFICATE-----") { + let s = String::from_utf8_lossy(&content); + let lines: Vec<_> = s.lines().filter(|l| !l.starts_with("-----")).collect(); + let b64 = lines.join(""); + base64::Engine::decode(&base64::prelude::BASE64_STANDARD, b64).unwrap_or_default() + } else { + content + }; + + let Ok((_, cert)) = X509Certificate::from_der(&cert_der) else { + return true; + }; + + let now = chrono::Utc::now().timestamp(); + let not_after = cert.validity().not_after.timestamp(); + let remaining_days = (not_after - now) / 86400; + + remaining_days < RENEW_BEFORE_DAYS +} + +async fn ensure_simulated_cert(host: &str) -> bool { + if !needs_renewal(host).await { + return false; + } + + info!("Generating self-signed certificate for {}", host); + + let mut params = CertificateParams::default(); + params.subject_alt_names = vec![SanType::DnsName(host.to_string().try_into().unwrap())]; + let mut dn = DistinguishedName::new(); + dn.push(rcgen::DnType::CommonName, host); + params.distinguished_name = dn; + + let keypair = KeyPair::generate().unwrap(); + let cert = params.self_signed(&keypair).unwrap(); + + let cert_path = Path::new(CERTS_DIR).join(format!("{}.crt", host)); + let key_path = Path::new(CERTS_DIR).join(format!("{}.key", host)); + + tokio::fs::write(cert_path, cert.pem()).await.unwrap(); + tokio::fs::write(key_path, keypair.serialize_pem()) + .await + .unwrap(); + + true +} + +async fn ensure_acme_cert( + settings: &CertbotSettings, + host: &str, +) -> Result> { + if !needs_renewal(host).await { + return Ok(false); + } + + info!("Starting ACME flow for {}", host); + + // 1. Setup ACME account + let contact = format!("mailto:{}", settings.email); + let (account, _) = Account::builder()? + .create( + &NewAccount { + contact: &[&contact], + terms_of_service_agreed: true, + only_return_existing: false, + }, + LetsEncrypt::Production.url().to_string(), + None, + ) + .await?; + + // 2. Create order + let identifiers = vec![Identifier::Dns(host.to_string())]; + let mut order = account.new_order(&NewOrder::new(&identifiers)).await?; + + // 3. Handle challenges + let mut auths = order.authorizations(); + while let Some(auth_res) = auths.next().await { + let mut auth = auth_res?; + let mut challenge = auth + .challenge(ChallengeType::Http01) + .ok_or("No HTTP-01 challenge found")?; + + let key_auth = challenge.key_authorization(); + let token = challenge.token.clone(); + + // Save challenge to disk for DispenserProxy to serve + let challenge_path = Path::new(CHALLENGES_DIR).join(&token); + tokio::fs::write(challenge_path, key_auth.as_str()).await?; + + // Tell ACME provider we are ready + challenge.set_ready().await?; + } + + // 4. Poll for completion + let retry_policy = RetryPolicy::new() + .timeout(Duration::from_secs(30)) + .initial_delay(Duration::from_secs(2)); + + let status = order.poll_ready(&retry_policy).await?; + if status != OrderStatus::Ready { + return Err(format!("ACME order failed with status: {:?}", status).into()); + } + + // 5. Finalize and get certificate + // finalize() generates CSR and returns the private key PEM + let key_pem = order.finalize().await?; + + // poll_certificate() waits for the order to become valid and returns the certificate chain PEM + let cert_chain_pem = order.poll_certificate(&retry_policy).await?; + + // 6. Save results + let cert_path = Path::new(CERTS_DIR).join(format!("{}.crt", host)); + let key_path = Path::new(CERTS_DIR).join(format!("{}.key", host)); + + tokio::fs::write(cert_path, cert_chain_pem).await?; + tokio::fs::write(key_path, key_pem).await?; + + // Cleanup challenge + let _ = tokio::fs::remove_dir_all(CHALLENGES_DIR).await; + let _ = tokio::fs::create_dir_all(CHALLENGES_DIR).await; + + Ok(true) +} diff --git a/src/proxy/certs.rs b/src/proxy/certs.rs new file mode 100644 index 0000000..c1634f2 --- /dev/null +++ b/src/proxy/certs.rs @@ -0,0 +1,108 @@ +use log::{error, info, warn}; +use openssl::ssl::{SslContext, SslMethod}; +use rcgen::{CertificateParams, DistinguishedName, KeyPair, SanType}; +use std::collections::HashMap; +use std::fs; +use std::path::{Path, PathBuf}; +use std::sync::Arc; + +use crate::service::manager::ServicesManager; + +pub type CertMap = HashMap>; + +/// Loads all certificates for services managed by the ServicesManager. +/// This includes both manually configured certificates and those automatically +/// managed in the .dispenser/certs directory. +pub fn load_all_certificates(manager: &ServicesManager) -> CertMap { + let mut cert_map = HashMap::new(); + let proxy_configs = manager.get_proxy_configs(); + + let certs_dir = Path::new(".dispenser/certs"); + + for proxy in proxy_configs { + let host = proxy.host.clone(); + + // 1. Check for manual overrides in service.toml + if let (Some(cert_path), Some(key_path)) = (&proxy.cert_file, &proxy.key_file) { + match create_ssl_context(cert_path, key_path) { + Ok(context) => { + info!("Loaded manual SSL certificate for {}", host); + cert_map.insert(host, Arc::new(context)); + continue; + } + Err(e) => { + error!("Failed to load manual SSL certificate for {}: {}", host, e); + } + } + } + + // 2. Check for automatically managed certificates (ACME or Simulated) + let auto_cert_path = certs_dir.join(format!("{}.crt", host)); + let auto_key_path = certs_dir.join(format!("{}.key", host)); + + if auto_cert_path.exists() && auto_key_path.exists() { + match create_ssl_context(&auto_cert_path, &auto_key_path) { + Ok(context) => { + info!("Loaded automatic SSL certificate for {}", host); + cert_map.insert(host, Arc::new(context)); + } + Err(e) => { + error!( + "Failed to load automatic SSL certificate for {}: {}", + host, e + ); + } + } + } else { + warn!("No SSL certificate found for {}", host); + } + } + + cert_map +} + +/// Ensures a default self-signed certificate exists for the proxy fallback. +/// Returns (cert_path, key_path). +pub fn ensure_default_cert() -> (PathBuf, PathBuf) { + let dir = Path::new(".dispenser"); + let cert_path = dir.join("default.crt"); + let key_path = dir.join("default.key"); + + if cert_path.exists() && key_path.exists() { + return (cert_path, key_path); + } + + let _ = fs::create_dir_all(dir); + + info!("Generating default fallback self-signed certificate..."); + + let mut params = CertificateParams::default(); + params.subject_alt_names = vec![SanType::DnsName( + "localhost".to_string().try_into().unwrap(), + )]; + let mut dn = DistinguishedName::new(); + dn.push(rcgen::DnType::CommonName, "dispenser-fallback"); + params.distinguished_name = dn; + + let keypair = KeyPair::generate().unwrap(); + let cert = params.self_signed(&keypair).unwrap(); + + fs::write(&cert_path, cert.pem()).unwrap(); + fs::write(&key_path, keypair.serialize_pem()).unwrap(); + + (cert_path, key_path) +} + +/// Helper to create a Pingora-compatible SslContext from cert and key files. +fn create_ssl_context( + cert_path: &Path, + key_path: &Path, +) -> Result> { + let mut builder = openssl::ssl::SslAcceptor::mozilla_intermediate(SslMethod::tls())?; + + builder.set_certificate_chain_file(cert_path)?; + builder.set_private_key_file(key_path, openssl::ssl::SslFiletype::PEM)?; + builder.check_private_key()?; + + Ok(builder.build().into_context()) +} diff --git a/src/proxy/mod.rs b/src/proxy/mod.rs new file mode 100644 index 0000000..1db728b --- /dev/null +++ b/src/proxy/mod.rs @@ -0,0 +1,205 @@ +pub mod acme; +pub mod certs; + +use async_trait::async_trait; +use http::{header, Response, StatusCode}; +use log::{debug, info}; +use openssl::ssl::{NameType, SniError}; +use pingora::apps::http_app::ServeHttp; +use pingora::listeners::tls::TlsSettings; +use pingora::protocols::http::ServerSession; +use pingora::server::{RunArgs, ShutdownSignal}; +use pingora::services::listening::Service; +use pingora_error::ErrorType::HTTPStatus; +use std::sync::Arc; + +use pingora_core::server::configuration::Opt; +use pingora_core::server::Server; + +use pingora::prelude::*; + +use crate::service::manager::ServicesManager; + +pub struct AcmeService; + +#[async_trait] +impl ServeHttp for AcmeService { + async fn response(&self, http_stream: &mut ServerSession) -> Response> { + let path = http_stream.req_header().uri.path(); + + // 1. Handle ACME challenges + if path.starts_with("/.well-known/acme-challenge/") { + let token = path + .strip_prefix("/.well-known/acme-challenge/") + .unwrap_or(""); + let challenge_path = std::path::Path::new(".dispenser/challenges").join(token); + + if let Ok(content) = tokio::fs::read(challenge_path).await { + return Response::builder() + .status(StatusCode::OK) + .header(header::CONTENT_TYPE, "text/plain") + .header(header::CONTENT_LENGTH, content.len()) + .body(content) + .unwrap(); + } + } + + let host_header = http_stream + .get_header(header::HOST) + .unwrap() + .to_str() + .unwrap(); + debug!("host header: {host_header}"); + + let path_and_query = http_stream + .req_header() + .uri + .path_and_query() + .map(|pq| pq.as_str()) + .unwrap_or("/"); + + let body = "301 Moved Permanently" + .as_bytes() + .to_owned(); + + Response::builder() + .status(StatusCode::MOVED_PERMANENTLY) + .header(header::CONTENT_TYPE, "text/html") + .header(header::CONTENT_LENGTH, body.len()) + .header( + header::LOCATION, + format!("https://{}{}", host_header, path_and_query), + ) + .body(body) + .unwrap() + } +} + +/// Router that holds multiple Service configurations and routes based on SNI/Host +pub struct DispenserProxy { + pub services_manager: Arc, +} + +#[async_trait] +impl ProxyHttp for DispenserProxy { + type CTX = (); + + fn new_ctx(&self) {} + + async fn upstream_peer(&self, session: &mut Session, _ctx: &mut ()) -> Result> { + // Get the Host header from the request + let host = session.req_header().uri.host().or_else(|| { + session + .req_header() + .headers + .get("host") + .and_then(|h| h.to_str().ok()) + }); + + let upstream = host + .and_then(|host| self.services_manager.resolve_host(host)) + .ok_or_else(|| { + Error::explain( + HTTPStatus(502), + format!("No upstream configured for host: {:?}", host), + ) + })?; + + let peer = Box::new(HttpPeer::new(upstream, false, String::new())); + + Ok(peer) + } +} + +#[derive(Debug, Clone)] +pub struct ProxySignals { + receiver: Arc>>, + sender: tokio::sync::mpsc::Sender, +} + +impl ProxySignals { + pub fn new() -> Self { + let (tx, rx) = tokio::sync::mpsc::channel(1); + ProxySignals { + sender: tx, + receiver: Arc::new(tokio::sync::Mutex::new(rx)), + } + } + pub async fn send_signal(&self, signal: ShutdownSignal) { + let _ = self.sender.send(signal).await; + } +} + +#[async_trait] +impl pingora::server::ShutdownSignalWatch for ProxySignals { + async fn recv(&self) -> ShutdownSignal { + let mut rx = self.receiver.lock().await; + match rx.recv().await { + None => unreachable!(), + Some(signal) => signal, + } + } +} + +pub fn run_dummy_proxy(signals: ProxySignals) { + let opt = Opt::default(); + let mut my_server = Server::new(Some(opt)).unwrap(); + my_server.bootstrap(); + my_server.run(RunArgs { + shutdown_signal: Box::new(signals), + }); +} + +pub fn run_proxy(services_manager: Arc, signals: ProxySignals) { + let opt = Opt::default(); + let mut my_server = Server::new(Some(opt)).unwrap(); + + // 1. Load certificates + let cert_map = Arc::new(certs::load_all_certificates(&services_manager)); + let (default_cert, default_key) = certs::ensure_default_cert(); + + // 2. Setup Proxy + let mut proxy_service = http_proxy_service( + &my_server.configuration, + DispenserProxy { + services_manager: services_manager.clone(), + }, + ); + + // 3. Configure TLS with SNI callback + // We use intermediate settings and then override with callback + let mut tls_settings = TlsSettings::intermediate( + default_cert.to_str().unwrap(), + default_key.to_str().unwrap(), + ) + .expect("Failed to load default fallback certificate"); + + tls_settings.enable_h2(); + + // Set SNI callback + let cert_map_for_sni = cert_map.clone(); + tls_settings.set_servername_callback(move |ssl, _| { + let host = ssl.servername(NameType::HOST_NAME); + debug!("SNI callback for host: {:?}", host); + if let Some(host) = host { + if let Some(ctx) = cert_map_for_sni.get(host) { + let _ = ssl.set_ssl_context(ctx); + } + } + Ok::<(), SniError>(()) + }); + + proxy_service.add_tls_with_settings("0.0.0.0:443", None, tls_settings); + + let mut acme_service = Service::new("Echo Service HTTP".to_string(), AcmeService); + acme_service.add_tcp("0.0.0.0:80"); + + my_server.add_service(proxy_service); + my_server.add_service(acme_service); + my_server.bootstrap(); + + info!("Proxy starting on port 443"); + my_server.run(RunArgs { + shutdown_signal: Box::new(signals), + }); +} diff --git a/src/service/cron_watcher.rs b/src/service/cron_watcher.rs new file mode 100644 index 0000000..4e67048 --- /dev/null +++ b/src/service/cron_watcher.rs @@ -0,0 +1,211 @@ +use std::sync::atomic::{AtomicI64, Ordering}; + +use chrono::{DateTime, Local}; +use cron::Schedule; + +#[derive(Debug)] +pub struct CronWatcher { + schedule: Schedule, + next: AtomicI64, +} + +const NONE_TIMESTAMP: i64 = i64::MIN; + +impl CronWatcher { + pub fn new(schedule: &Schedule) -> Self { + let schedule = schedule.clone(); + let next = schedule + .upcoming(Local) + .next() + .map(|d| d.timestamp()) + .unwrap_or(NONE_TIMESTAMP); + let next = AtomicI64::new(next); + Self { schedule, next } + } + + pub fn is_ready(&self) -> bool { + let current = self.next.load(Ordering::SeqCst); + if current == NONE_TIMESTAMP { + return false; + } + + let next_dt = DateTime::from_timestamp(current, 0).map(|dt| dt.with_timezone(&Local)); + + match next_dt { + Some(next) if chrono::Local::now() >= next => { + let new_next = self + .schedule + .upcoming(Local) + .next() + .map(|d| d.timestamp()) + .unwrap_or(NONE_TIMESTAMP); + + // Only succeed if no other thread beat us to it + self.next + .compare_exchange(current, new_next, Ordering::SeqCst, Ordering::SeqCst) + .is_ok() + } + _ => false, + } + } +} + +impl PartialEq for CronWatcher { + fn eq(&self, other: &Self) -> bool { + self.schedule == other.schedule + && self.next.load(Ordering::SeqCst) == other.next.load(Ordering::SeqCst) + } +} + +impl Eq for CronWatcher {} + +#[cfg(test)] +mod tests { + use super::*; + use std::str::FromStr; + + fn make_schedule(expr: &str) -> Schedule { + Schedule::from_str(expr).expect("Failed to parse cron expression") + } + + #[test] + fn test_new_initializes_with_next_timestamp() { + // Every minute schedule - should always have a next occurrence + let schedule = make_schedule("* * * * * *"); + let watcher = CronWatcher::new(&schedule); + + let next = watcher.next.load(Ordering::SeqCst); + assert!(next != NONE_TIMESTAMP, "Expected next timestamp to be set"); + + assert!( + next >= chrono::Local::now().timestamp(), + "Next timestamp should be in the future or now" + ); + } + + #[test] + fn test_next_none_timestamp() { + let schedule = make_schedule("* * * * * *"); + let watcher = CronWatcher::new(&schedule); + + // Manually set the next timestamp to NONE_TIMESTAMP + watcher.next.store(NONE_TIMESTAMP, Ordering::SeqCst); + + assert_eq!( + watcher.next.load(Ordering::SeqCst), + NONE_TIMESTAMP, + "Expected NONE_TIMESTAMP" + ); + } + + #[test] + fn test_next_is_valid_timestamp() { + let schedule = make_schedule("* * * * * *"); + let watcher = CronWatcher::new(&schedule); + + let next = watcher.next.load(Ordering::SeqCst); + assert!(next != NONE_TIMESTAMP); + + // Verify the returned timestamp is reasonable (within the next second) + let now = chrono::Local::now().timestamp(); + let diff = next - now; + assert!( + diff <= 1, + "Next occurrence should be within 1 second for per-second schedule" + ); + } + + #[test] + fn test_is_ready_returns_false_when_not_yet_time() { + // Schedule far in the future (year 2099) + let schedule = make_schedule("0 0 0 1 1 * 2099"); + let watcher = CronWatcher::new(&schedule); + + assert!( + !watcher.is_ready(), + "Should not be ready when next time is in the future" + ); + } + + #[test] + fn test_is_ready_returns_true_and_updates_when_time_passed() { + let schedule = make_schedule("* * * * * *"); + let watcher = CronWatcher::new(&schedule); + + // Set the next timestamp to a time in the past + let past_timestamp = chrono::Local::now().timestamp() - 10; + watcher.next.store(past_timestamp, Ordering::SeqCst); + + let old_next = watcher.next.load(Ordering::SeqCst); + assert!(watcher.is_ready(), "Should be ready when time has passed"); + + // Verify the next timestamp was updated + let new_next = watcher.next.load(Ordering::SeqCst); + assert!( + new_next > old_next, + "Next timestamp should be updated after is_ready returns true" + ); + } + + #[test] + fn test_is_ready_returns_false_when_next_is_none() { + let schedule = make_schedule("* * * * * *"); + let watcher = CronWatcher::new(&schedule); + + // Set to NONE_TIMESTAMP + watcher.next.store(NONE_TIMESTAMP, Ordering::SeqCst); + + assert!( + !watcher.is_ready(), + "Should not be ready when next timestamp is None" + ); + } + + #[test] + fn test_partial_eq_same_schedule_same_next() { + let schedule = make_schedule("0 0 * * * *"); + let watcher1 = CronWatcher::new(&schedule); + let watcher2 = CronWatcher::new(&schedule); + + // Both should have the same next timestamp since they use the same schedule + assert_eq!(watcher1, watcher2); + } + + #[test] + fn test_partial_eq_same_schedule_different_next() { + let schedule = make_schedule("0 0 * * * *"); + let watcher1 = CronWatcher::new(&schedule); + let watcher2 = CronWatcher::new(&schedule); + + // Modify one watcher's next timestamp + watcher2.next.store(12345, Ordering::SeqCst); + + assert_ne!(watcher1, watcher2); + } + + #[test] + fn test_partial_eq_different_schedule() { + let schedule1 = make_schedule("0 0 * * * *"); + let schedule2 = make_schedule("0 30 * * * *"); + let watcher1 = CronWatcher::new(&schedule1); + let watcher2 = CronWatcher::new(&schedule2); + + assert_ne!(watcher1, watcher2); + } + + #[test] + fn test_is_ready_boundary_condition_exact_time() { + let schedule = make_schedule("* * * * * *"); + let watcher = CronWatcher::new(&schedule); + + // Set next to exactly now + let now_timestamp = chrono::Local::now().timestamp(); + watcher.next.store(now_timestamp, Ordering::SeqCst); + + // Should be ready since now >= next + assert!( + watcher.is_ready(), + "Should be ready when current time equals next time" + ); + } +} diff --git a/src/service/docker.rs b/src/service/docker.rs new file mode 100644 index 0000000..58d55cd --- /dev/null +++ b/src/service/docker.rs @@ -0,0 +1,20 @@ +//! Docker client module using bollard. +//! +//! This module provides a shared Docker client instance and helper functions +//! for interacting with Docker via the bollard API. + +use bollard::Docker; +use std::sync::OnceLock; + +static DOCKER_CLIENT: OnceLock = OnceLock::new(); + +/// Get a reference to the shared Docker client. +/// +/// This lazily initializes the Docker client on first use. +/// The client connects to Docker using the default connection method +/// (Unix socket on Linux/macOS, named pipe on Windows). +pub fn get_docker() -> &'static Docker { + DOCKER_CLIENT.get_or_init(|| { + Docker::connect_with_local_defaults().expect("Failed to connect to Docker daemon") + }) +} diff --git a/src/service/file.rs b/src/service/file.rs index 0195a83..5df9656 100644 --- a/src/service/file.rs +++ b/src/service/file.rs @@ -1,18 +1,41 @@ use cron::Schedule; use serde::{Deserialize, Serialize}; -use std::{collections::HashMap, path::PathBuf}; +use std::{ + collections::HashMap, + path::{Path, PathBuf}, +}; use super::vars::{render_template, ServiceConfigError, ServiceVarsMaterialized}; #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct EntrypointFile { - #[serde(rename = "service")] + #[serde(rename = "service", default)] pub services: Vec, - #[serde(rename = "network")] + #[serde(rename = "network", default)] pub networks: Vec, + #[serde(default)] + pub proxy: GlobalProxyConfig, /// Delay in seconds between polling for new images (default: 60) #[serde(default = "default_delay")] pub delay: u64, + pub certbot: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct GlobalProxyConfig { + #[serde(default = "default_true")] + pub enabled: bool, +} + +impl Default for GlobalProxyConfig { + fn default() -> Self { + Self { enabled: true } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct CertbotSettings { + pub email: String, } fn default_delay() -> u64 { @@ -21,10 +44,8 @@ fn default_delay() -> u64 { impl EntrypointFile { pub async fn try_init(vars: &ServiceVarsMaterialized) -> Result { - use std::io::Read; - let mut config = String::new(); let path = crate::cli::get_cli_args().config.clone(); - std::fs::File::open(&path)?.read_to_string(&mut config)?; + let config = tokio::fs::read_to_string(&path).await?; // Render the template with variables let rendered_config = @@ -61,6 +82,26 @@ fn default_true() -> bool { true } +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct ProxySettings { + /// Example: example.com, something.dispenser.org + /// + /// Equivalent to nginx server_name but without wildcards. + /// + /// TODO: Could we choose a better name? + /// + /// TODO: Document this + pub host: String, + /// The port of the service running inside the container. + /// The dispenser reverse proxy will send HTTP/WebSocket traffic + /// to this port. + /// + /// TODO: Can we have a better name for this config value? + pub service_port: u16, + pub cert_file: Option, + pub key_file: Option, +} + #[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq, Eq)] pub enum NetworkDriver { #[default] @@ -100,6 +141,8 @@ pub struct ServiceFile { pub dispenser: DispenserConfig, #[serde(default)] pub depends_on: HashMap, + #[serde(default)] + pub proxy: Option, } /// Defines when a service should be initialized. @@ -182,14 +225,60 @@ pub struct PortEntry { pub container: u16, } +#[derive(Debug, Clone, Serialize, PartialEq, Eq)] +#[serde(untagged)] +pub enum VolumeSource { + Name(String), + Path(PathBuf), +} + +impl<'de> Deserialize<'de> for VolumeSource { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let raw = String::deserialize(deserializer)?; + if raw.contains('/') { + return Ok(Self::Path(PathBuf::from(raw))); + } + Ok(Self::Name(raw)) + } +} + #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct VolumeEntry { - pub source: String, + pub source: VolumeSource, pub target: String, #[serde(default)] pub readonly: bool, } +impl VolumeEntry { + // If the source is a path, returns the + // absolute path to the path entry relative to + // the `service.toml` file. If it's a volume name + // it returns the volume name directly. + pub fn normalized_source(&self, wd: &Path) -> Result { + // Since this type is just a string behind the scenes + // we can unwrap and guarantee utf-8 + match &self.source { + VolumeSource::Path(path) => { + if Path::new(path).is_absolute() { + return Ok(String::from_utf8( + path.clone().into_os_string().into_encoded_bytes(), + )?); + } + Ok(String::from_utf8( + std::path::absolute(wd.join(path))? + .into_os_string() + .into_encoded_bytes(), + )?) + } + VolumeSource::Name(name) => Ok(name.clone()), + } + } +} + #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct ServiceEntry { pub name: String, diff --git a/src/service/instance.rs b/src/service/instance.rs index 851438b..6801339 100644 --- a/src/service/instance.rs +++ b/src/service/instance.rs @@ -1,41 +1,33 @@ -use std::{collections::HashMap, path::PathBuf, time::Duration}; +use std::net::{Ipv4Addr, SocketAddrV4}; +use std::{collections::HashMap, path::PathBuf, sync::Arc, time::Duration}; -use chrono::{DateTime, Local}; -use cron::Schedule; +use bollard::models::{ + ContainerCreateBody, EndpointIpamConfig, EndpointSettings, HostConfig, NetworkConnectRequest, + NetworkingConfig, PortBinding, RestartPolicy, RestartPolicyNameEnum, +}; +use bollard::query_parameters::{ + CreateContainerOptions, CreateContainerOptionsBuilder, CreateImageOptions, + CreateImageOptionsBuilder, InspectContainerOptions, InspectContainerOptionsBuilder, + RemoveContainerOptions, RemoveContainerOptionsBuilder, StartContainerOptions, + StartContainerOptionsBuilder, StopContainerOptions, StopContainerOptionsBuilder, +}; +use futures_util::StreamExt; +use crate::service::cron_watcher::CronWatcher; +use crate::service::file::ProxySettings; +use crate::service::vars::ServiceConfigError; use crate::service::{ + docker::get_docker, file::{ DependsOnCondition, DispenserConfig, Initialize, Network, PortEntry, PullOptions, Restart, ServiceEntry, VolumeEntry, }, manifest::{ImageWatcher, ImageWatcherStatus}, + network::DEFAULT_NETWORK_NAME, }; -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct CronWatcher { - schedule: Schedule, - next: Option>, -} - -impl CronWatcher { - pub fn new(schedule: &Schedule) -> Self { - let schedule = schedule.clone(); - let next = schedule.upcoming(Local).next(); - Self { schedule, next } - } - fn is_ready(&mut self) -> bool { - match self.next { - Some(next) if chrono::Local::now() >= next => { - self.next = self.schedule.upcoming(Local).next(); - true - } - Some(_) | None => false, - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct ServiceInstance { +#[derive(Debug, PartialEq, Eq)] +pub struct ServiceInstanceConfig { pub dir: PathBuf, pub service: ServiceEntry, pub ports: Vec, @@ -45,6 +37,15 @@ pub struct ServiceInstance { pub network: Vec, pub dispenser: DispenserConfig, pub depends_on: HashMap, + pub proxy: Option, + /// The static IP address assigned to this service on the dispenser network. + /// This is managed by dispenser's IPAM to ensure stability across restarts. + pub assigned_ip: Ipv4Addr, +} + +#[derive(Debug)] +pub struct ServiceInstance { + pub config: Arc, pub cron_watcher: Option, pub image_watcher: Option, } @@ -56,41 +57,39 @@ pub enum ContainerStatus { NotFound, } -/// This function queries the status of a container +/// This function queries the status of a container using bollard /// Returns whether it's up, exited successfully (0 exit status), or failed -async fn get_container_status(container_name: &str) -> Result { - let output = tokio::process::Command::new("docker") - .args([ - "inspect", - "--format", - "{{.State.Status}},{{.State.ExitCode}}", - container_name, - ]) - .output() - .await?; - - if !output.status.success() { - return Ok(ContainerStatus::NotFound); - } - - let status_str = String::from_utf8_lossy(&output.stdout); - let parts: Vec<&str> = status_str.trim().split(',').collect(); - - match parts.as_slice() { - [status, _exit_code] if *status == "running" => Ok(ContainerStatus::Running), - [_, exit_code] => { - let code = exit_code.parse::().unwrap_or(-1); - Ok(ContainerStatus::Exited(code)) +async fn get_container_status(container_name: &str) -> Result { + let docker = get_docker(); + + let options: InspectContainerOptions = InspectContainerOptionsBuilder::new().build(); + + match docker + .inspect_container(container_name, Some(options)) + .await + { + Ok(info) => { + if let Some(state) = info.state { + if state.running.unwrap_or(false) { + return Ok(ContainerStatus::Running); + } + let exit_code = state.exit_code.unwrap_or(-1) as i32; + return Ok(ContainerStatus::Exited(exit_code)); + } + Ok(ContainerStatus::NotFound) } - _ => Ok(ContainerStatus::NotFound), + Err(bollard::errors::Error::DockerResponseServerError { + status_code: 404, .. + }) => Ok(ContainerStatus::NotFound), + Err(e) => Err(ServiceConfigError::DockerApi(e)), } } impl ServiceInstance { - pub async fn run_container(&self) -> Result<(), std::io::Error> { - let mut depends_on_conditions = Vec::with_capacity(self.depends_on.len()); + pub async fn run_container(&self) -> Result<(), ServiceConfigError> { + let mut depends_on_conditions = Vec::with_capacity(self.config.depends_on.len()); loop { - for (container, condition) in &self.depends_on { + for (container, condition) in &self.config.depends_on { let status = match get_container_status(container).await { Ok(status) => match condition { DependsOnCondition::ServiceStarted => { @@ -105,7 +104,7 @@ impl ServiceInstance { if !status { log::info!( "Service {} is waiting for {} ({:?})", - self.service.name, + self.config.service.name, container, condition ); @@ -119,193 +118,319 @@ impl ServiceInstance { tokio::time::sleep(Duration::from_secs(1)).await; } - if self.dispenser.pull == PullOptions::Always || self.container_does_not_exist().await { + if self.config.dispenser.pull == PullOptions::Always + || self.container_does_not_exist().await + { self.recreate_container().await?; } - let output = tokio::process::Command::new("docker") - .args(["start", &self.service.name]) - .output() - .await?; - - if output.status.success() { - log::info!("Container {} started successfully", self.service.name); - Ok(()) - } else { - let error_msg = String::from_utf8_lossy(&output.stderr); - log::error!( - "Failed to start container {}: {}", - self.service.name, - error_msg - ); - Err(std::io::Error::other( - format!("Failed to start container: {}", error_msg), - )) - } - } - pub async fn pull_image(&self) -> Result<(), std::io::Error> { - log::info!("Pulling image: {}", self.service.image); - let output = tokio::process::Command::new("docker") - .args(["pull", &self.service.image]) - .output() - .await?; - - if output.status.success() { - log::info!("Image {} pulled successfully", self.service.image); - Ok(()) - } else { - let error_msg = String::from_utf8_lossy(&output.stderr); - log::error!("Failed to pull image {}: {}", self.service.image, error_msg); - Err(std::io::Error::other( - format!("Failed to pull image: {}", error_msg), - )) - } - } + let docker = get_docker(); - pub async fn stop_container(&self) -> Result<(), std::io::Error> { - log::info!("Stopping container: {}", self.service.name); - let output = tokio::process::Command::new("docker") - .args(["stop", &self.service.name]) - .output() - .await?; - - if output.status.success() { - log::info!("Container {} stopped successfully", self.service.name); - Ok(()) - } else { - let error_msg = String::from_utf8_lossy(&output.stderr); - log::warn!( - "Failed to stop container {}: {}", - self.service.name, - error_msg - ); - Err(std::io::Error::other( - format!("Failed to warn container: {}", error_msg), - )) - } + let options: StartContainerOptions = StartContainerOptionsBuilder::new().build(); + + docker + .start_container(&self.config.service.name, Some(options)) + .await + .inspect_err(|e| { + log::error!( + "Failed to start container {}: {}", + self.config.service.name, + e + ); + })?; + + log::info!( + "Container {} started successfully", + self.config.service.name + ); + + Ok(()) } - pub async fn remove_container(&self) -> Result<(), std::io::Error> { - log::info!("Removing container: {}", self.service.name); - let output = tokio::process::Command::new("docker") - .args(["rm", "-f", &self.service.name]) - .output() - .await?; - - if output.status.success() { - log::info!("Container {} removed successfully", self.service.name); - Ok(()) - } else { - let error_msg = String::from_utf8_lossy(&output.stderr); - log::error!( - "Failed to remove container {}: {}", - self.service.name, - error_msg - ); - Err(std::io::Error::other( - format!("Failed to remove container: {}", error_msg), - )) - } + /// Get the socket address for this service if proxy is configured. + /// The address is computed directly from the static IP and service port. + pub fn get_socket_addr(&self) -> Option { + self.config.proxy.as_ref().map(|proxy_settings| { + SocketAddrV4::new(self.config.assigned_ip, proxy_settings.service_port) + }) } - pub async fn create_container(&self) -> Result<(), std::io::Error> { - log::info!("Creating container: {}", self.service.name); + pub async fn pull_image(&self) -> Result<(), ServiceConfigError> { + log::info!("Pulling image: {}", self.config.service.image); + let docker = get_docker(); - let mut cmd = tokio::process::Command::new("docker"); - cmd.arg("create"); - cmd.args(["--name", &self.service.name]); + // Parse image name and tag + let (image, tag) = parse_image_reference(&self.config.service.image); - // Add restart policy - match self.restart { - Restart::Always => cmd.args(["--restart", "always"]), - Restart::No => cmd.args(["--restart", "no"]), - Restart::OnFailure => cmd.args(["--restart", "on-failure"]), - Restart::UnlessStopped => cmd.args(["--restart", "unless-stopped"]), - }; + let options: CreateImageOptions = CreateImageOptionsBuilder::new() + .from_image(image) + .tag(tag) + .build(); - // Add port mappings - for port in &self.ports { - cmd.args(["-p", &format!("{}:{}", port.host, port.container)]); - } + let mut stream = docker.create_image(Some(options), None, None); - // Add volume mappings - for volume in &self.volume { - let mount_str = if volume.readonly { - format!("{}:{}:ro", volume.source, volume.target) - } else { - format!("{}:{}", volume.source, volume.target) - }; - cmd.args(["-v", &mount_str]); + while let Some(result) = stream.next().await { + match result { + Ok(info) => { + if let Some(status) = info.status { + log::debug!("Pull status: {}", status); + } + } + Err(e) => { + log::error!("Failed to pull image {}: {}", self.config.service.image, e); + return Err(ServiceConfigError::DockerApi(e)); + } + } } - // Add environment variables - for (key, value) in &self.env { - cmd.args(["-e", &format!("{}={}", key, value)]); - } + log::info!("Image {} pulled successfully", self.config.service.image); + Ok(()) + } - // Add networks - for network in &self.network { - cmd.args(["--network", &network.name]); - } + pub async fn stop_container(&self) -> Result<(), ServiceConfigError> { + log::info!("Stopping container: {}", self.config.service.name); + let docker = get_docker(); - // Add resource limits - if let Some(memory) = &self.service.memory { - cmd.args(["--memory", memory]); - } - if let Some(cpus) = &self.service.cpus { - cmd.args(["--cpus", cpus]); - } + let options: StopContainerOptions = StopContainerOptionsBuilder::new().t(10).build(); - // Add working directory - if let Some(working_dir) = &self.service.working_dir { - cmd.args(["--workdir", working_dir]); + match docker + .stop_container(&self.config.service.name, Some(options)) + .await + { + Ok(_) => { + log::info!( + "Container {} stopped successfully", + self.config.service.name + ); + Ok(()) + } + Err(bollard::errors::Error::DockerResponseServerError { + status_code: 404, .. + }) => { + log::warn!( + "Container {} not found, skipping stop", + self.config.service.name + ); + Ok(()) + } + Err(bollard::errors::Error::DockerResponseServerError { + status_code: 304, .. + }) => { + log::info!("Container {} already stopped", self.config.service.name); + Ok(()) + } + Err(e) => { + log::warn!( + "Failed to stop container {}: {}", + self.config.service.name, + e + ); + Err(ServiceConfigError::DockerApi(e)) + } } + } - // Add user - if let Some(user) = &self.service.user { - cmd.args(["--user", user]); - } + pub async fn remove_container(&self) -> Result<(), ServiceConfigError> { + log::info!("Removing container: {}", self.config.service.name); + let docker = get_docker(); + + let options: RemoveContainerOptions = + RemoveContainerOptionsBuilder::new().force(true).build(); - // Add hostname - if let Some(hostname) = &self.service.hostname { - cmd.args(["--hostname", hostname]); + match docker + .remove_container(&self.config.service.name, Some(options)) + .await + { + Ok(_) => { + log::info!( + "Container {} removed successfully", + self.config.service.name + ); + Ok(()) + } + Err(bollard::errors::Error::DockerResponseServerError { + status_code: 404, .. + }) => { + log::info!( + "Container {} not found, skipping removal", + self.config.service.name + ); + Ok(()) + } + Err(e) => { + log::error!( + "Failed to remove container {}: {}", + self.config.service.name, + e + ); + Err(ServiceConfigError::DockerApi(e)) + } } + } - // Add entrypoint if specified - if let Some(entrypoint) = &self.service.entrypoint { - cmd.arg("--entrypoint"); - cmd.arg(entrypoint.join(" ")); + pub async fn create_container(&self) -> Result<(), ServiceConfigError> { + log::info!("Creating container: {}", self.config.service.name); + let docker = get_docker(); + + // Build port bindings + let mut port_bindings: HashMap>> = HashMap::new(); + let mut exposed_ports: HashMap> = HashMap::new(); + + for port in &self.config.ports { + let container_port = format!("{}/tcp", port.container); + exposed_ports.insert(container_port.clone(), HashMap::new()); + port_bindings.insert( + container_port, + Some(vec![PortBinding { + host_ip: Some("0.0.0.0".to_string()), + host_port: Some(port.host.to_string()), + }]), + ); } - // Add the image - cmd.arg(&self.service.image); + // Build volume bindings + let binds: Vec = self + .config + .volume + .iter() + .map(|v| { + let source = v.normalized_source(&self.config.dir)?; + if v.readonly { + Ok(format!("{}:{}:ro", source, v.target)) + } else { + Ok(format!("{}:{}", source, v.target)) + } + }) + .collect::>()?; + + // Build environment variables + let env: Vec = self + .config + .env + .iter() + .map(|(k, v)| format!("{}={}", k, v)) + .collect(); + + // Build restart policy + let restart_policy = match self.config.restart { + Restart::Always => Some(RestartPolicy { + name: Some(RestartPolicyNameEnum::ALWAYS), + maximum_retry_count: None, + }), + Restart::No => Some(RestartPolicy { + name: Some(RestartPolicyNameEnum::NO), + maximum_retry_count: None, + }), + Restart::OnFailure => Some(RestartPolicy { + name: Some(RestartPolicyNameEnum::ON_FAILURE), + maximum_retry_count: None, + }), + Restart::UnlessStopped => Some(RestartPolicy { + name: Some(RestartPolicyNameEnum::UNLESS_STOPPED), + maximum_retry_count: None, + }), + }; - if let Some(command) = &self.service.command { - cmd.args(command); - } + // Parse memory limit + let memory = self + .config + .service + .memory + .as_ref() + .map(|m| parse_memory_limit(m)); + + // Parse CPU limit (convert to nano CPUs) + let nano_cpus = self.config.service.cpus.as_ref().map(|c| { + let cpus: f64 = c.parse().unwrap_or(1.0); + (cpus * 1_000_000_000.0) as i64 + }); + + // Build host config + // Always connect to the default dispenser network first + let host_config = HostConfig { + binds: if binds.is_empty() { None } else { Some(binds) }, + port_bindings: if port_bindings.is_empty() { + None + } else { + Some(port_bindings) + }, + restart_policy, + memory, + nano_cpus, + network_mode: Some(DEFAULT_NETWORK_NAME.to_string()), + ..Default::default() + }; - // Set the directory for the command - cmd.current_dir(&self.dir); + // Build networking config to attach to the default dispenser network with static IP + let mut endpoints_config: HashMap = HashMap::new(); + endpoints_config.insert( + DEFAULT_NETWORK_NAME.to_string(), + EndpointSettings { + ipam_config: Some(EndpointIpamConfig { + ipv4_address: Some(self.config.assigned_ip.to_string()), + ..Default::default() + }), + ..Default::default() + }, + ); + + let networking_config = NetworkingConfig { + endpoints_config: Some(endpoints_config), + }; - let output = cmd.output().await?; + // Build container config + let config = ContainerCreateBody { + image: Some(self.config.service.image.clone()), + hostname: self.config.service.hostname.clone(), + user: self.config.service.user.clone(), + working_dir: self.config.service.working_dir.clone(), + env: if env.is_empty() { None } else { Some(env) }, + cmd: self.config.service.command.clone(), + entrypoint: self.config.service.entrypoint.clone(), + exposed_ports: if exposed_ports.is_empty() { + None + } else { + Some(exposed_ports) + }, + host_config: Some(host_config), + networking_config: Some(networking_config), + ..Default::default() + }; - if output.status.success() { - log::info!("Container {} created successfully", self.service.name); - Ok(()) - } else { - let error_msg = String::from_utf8_lossy(&output.stderr); - log::error!( - "Failed to create container {}: {}", - self.service.name, - error_msg - ); - Err(std::io::Error::other( - format!("Failed to create container: {}", error_msg), - )) + let options: CreateContainerOptions = CreateContainerOptionsBuilder::new() + .name(&self.config.service.name) + .build(); + + docker.create_container(Some(options), config).await?; + + // Connect to user-defined networks (default dispenser network is already connected) + for network in &self.config.network { + let connect_request = NetworkConnectRequest { + container: Some(self.config.service.name.clone()), + endpoint_config: Some(EndpointSettings::default()), + }; + + docker + .connect_network(&network.name, connect_request) + .await + .inspect_err(|e| { + log::warn!( + "Failed to connect container {} to network {}: {}", + self.config.service.name, + network.name, + e + ); + })?; } + + log::info!( + "Container {} created successfully", + self.config.service.name + ); + Ok(()) } - pub async fn recreate_container(&self) -> Result<(), std::io::Error> { + pub async fn recreate_container(&self) -> Result<(), ServiceConfigError> { self.pull_image().await?; let _ = self.stop_container().await; let _ = self.remove_container().await; @@ -314,27 +439,33 @@ impl ServiceInstance { } pub async fn container_does_not_exist(&self) -> bool { - // Get the container inspection data - let output = match tokio::process::Command::new("docker") - .args(["inspect", "--format", "{{json .}}", &self.service.name]) - .output() + let docker = get_docker(); + + let options: InspectContainerOptions = InspectContainerOptionsBuilder::new().build(); + + match docker + .inspect_container(&self.config.service.name, Some(options)) .await { - Ok(output) => output, + Ok(_) => false, + Err(bollard::errors::Error::DockerResponseServerError { + status_code: 404, .. + }) => { + log::info!( + "Container {} does not exist, needs creation", + self.config.service.name + ); + true + } Err(e) => { - log::warn!("Failed to inspect container {}: {}", self.service.name, e); - return true; // If we can't inspect, assume recreate is needed + log::warn!( + "Failed to inspect container {}: {}", + self.config.service.name, + e + ); + true // If we can't inspect, assume recreate is needed } - }; - - if !output.status.success() { - log::info!( - "Container {} does not exist, needs creation", - self.service.name - ); - return true; } - false } /// Validate if the current container is different from @@ -345,34 +476,42 @@ impl ServiceInstance { } // If self and other are not equal we need to recreate the // container - self != other + self.config != other.config } pub async fn recreate_if_required(&self, other: &Self) { if self.requires_recreate(other).await { if let Err(e) = self.recreate_container().await { - log::error!("Failed to recreate container {}: {}", self.service.name, e); + log::error!( + "Failed to recreate container {}: {}", + self.config.service.name, + e + ); } } } - pub async fn poll(&mut self, poll_images: bool, init: bool) { - if init && self.dispenser.initialize == Initialize::Immediately { - log::info!("Starting {} immediately", self.service.name); + pub async fn poll(&self, poll_images: bool, init: bool) { + if init && self.config.dispenser.initialize == Initialize::Immediately { + log::info!("Starting {} immediately", self.config.service.name); if let Err(e) = self.run_container().await { - log::error!("Failed to run container {}: {}", self.service.name, e); + log::error!( + "Failed to run container {}: {}", + self.config.service.name, + e + ); } return; } // If uses cron - if let Some(cron_watcher) = &mut self.cron_watcher { + if let Some(cron_watcher) = &self.cron_watcher { if cron_watcher.is_ready() { // If the cron matches we can short circuit the function if let Err(e) = self.run_container().await { log::error!( "Failed to run container {} from cron: {}", - self.service.name, + self.config.service.name, e ); } @@ -382,29 +521,33 @@ impl ServiceInstance { } // If its ready to poll images - if self.dispenser.watch && poll_images { + if self.config.dispenser.watch && poll_images { // try to update the watchers and check // if any of them were updated - if let Some(image_watcher) = &mut self.image_watcher { + if let Some(image_watcher) = &self.image_watcher { match image_watcher.update().await { ImageWatcherStatus::Updated => { log::info!( "Image updated for service {}, recreating container...", - self.service.name + self.config.service.name ); if let Err(e) = self.recreate_container().await { log::error!( "Failed to recreate container {}: {}", - self.service.name, + self.config.service.name, e ); } if let Err(e) = self.run_container().await { - log::error!("Failed to run container {}: {}", self.service.name, e); + log::error!( + "Failed to run container {}: {}", + self.config.service.name, + e + ); } } ImageWatcherStatus::Deleted => { - log::warn!("Image for service {} was deleted", self.service.name); + log::warn!("Image for service {} was deleted", self.config.service.name); } ImageWatcherStatus::NotUpdated => {} } @@ -412,3 +555,44 @@ impl ServiceInstance { } } } + +/// Parse an image reference into (image, tag) components +fn parse_image_reference(image: &str) -> (&str, &str) { + // Handle digest references (image@sha256:...) + if let Some(at_pos) = image.find('@') { + return (&image[..at_pos], &image[at_pos..]); + } + + // Handle tag references (image:tag) + // Need to be careful with registry URLs that contain port numbers + // e.g., localhost:5000/myimage:tag + if let Some(colon_pos) = image.rfind(':') { + // Check if the colon is part of a port number in the registry URL + let after_colon = &image[colon_pos + 1..]; + // If there's a slash after the colon, it's a port number, not a tag + if !after_colon.contains('/') { + return (&image[..colon_pos], after_colon); + } + } + + // No tag specified, use "latest" + (image, "latest") +} + +/// Parse memory limit string (e.g., "512m", "2g") to bytes +fn parse_memory_limit(limit: &str) -> i64 { + let limit = limit.trim().to_lowercase(); + let (num_str, multiplier) = if limit.ends_with("g") { + (&limit[..limit.len() - 1], 1024 * 1024 * 1024) + } else if limit.ends_with("m") { + (&limit[..limit.len() - 1], 1024 * 1024) + } else if limit.ends_with("k") { + (&limit[..limit.len() - 1], 1024) + } else if limit.ends_with("b") { + (&limit[..limit.len() - 1], 1) + } else { + (limit.as_str(), 1) + }; + + num_str.parse::().unwrap_or(0) * multiplier +} diff --git a/src/service/manager.rs b/src/service/manager.rs index 21748b2..7ea4136 100644 --- a/src/service/manager.rs +++ b/src/service/manager.rs @@ -1,12 +1,19 @@ -use std::{path::PathBuf, sync::Arc, time::Duration}; +use std::{ + collections::{HashMap, HashSet}, + net::{Ipv4Addr, SocketAddrV4}, + path::PathBuf, + sync::Arc, + time::Duration, +}; use tokio::{sync::Mutex, task::JoinSet}; use crate::service::{ - file::{EntrypointFile, ServiceFile}, - instance::{CronWatcher, ServiceInstance}, + cron_watcher::CronWatcher, + file::{CertbotSettings, EntrypointFile, GlobalProxyConfig, ProxySettings, ServiceFile}, + instance::{ServiceInstance, ServiceInstanceConfig}, manifest::ImageWatcher, - network::NetworkInstance, + network::{ensure_default_network, remove_default_network, NetworkInstance}, vars::{render_template, ServiceConfigError, ServiceVarsMaterialized}, }; @@ -16,7 +23,7 @@ pub struct ServiceMangerConfig { } impl ServiceMangerConfig { - pub async fn try_init() -> Result { + pub async fn try_init(filter: Option<&[String]>) -> Result { // Load and materialize variables let vars = ServiceVarsMaterialized::try_init().await?; let entrypoint_file = EntrypointFile::try_init(&vars).await?; @@ -24,6 +31,16 @@ impl ServiceMangerConfig { let mut services = Vec::new(); for entry in &entrypoint_file.services { + if let Some(filter) = filter { + let path_str = entry.path.to_string_lossy(); + let matches = filter + .iter() + .any(|f| path_str == *f || path_str.ends_with(&format!("/{}", f))); + if !matches { + continue; + } + } + // Construct the path to service.toml let service_toml_path = entry.path.join("service.toml"); @@ -49,9 +66,12 @@ impl ServiceMangerConfig { struct ServiceManagerInner { // These two are craeted together. We can zip them pub service_names: Vec, - instances: Vec>>, + instances: Vec>, + router: HashMap, networks: Vec, delay: Duration, + certbot: Option, + proxy: GlobalProxyConfig, } pub struct ServicesManager { @@ -61,6 +81,15 @@ pub struct ServicesManager { } impl ServicesManager { + pub fn proxy_enabled(&self) -> bool { + self.inner.proxy.enabled + } + pub fn resolve_host(&self, host: &str) -> Option { + if let Some(&service_index) = self.inner.router.get(host) { + return self.inner.instances[service_index].get_socket_addr(); + } + None + } // We should ensure that the containers don't exist before start up. // This is to make 100% sure that dispenser controls these containers // and they don't exist previously. @@ -68,14 +97,13 @@ impl ServicesManager { let mut join_set = JoinSet::new(); for instance in &self.inner.instances { - let instance_clone = Arc::clone(instance); + let instance = Arc::clone(instance); join_set.spawn(async move { - let instance = instance_clone.lock().await; match instance.container_does_not_exist().await { true => Ok(()), false => Err(format!( "Container {} already exists", - instance.service.name + instance.config.service.name )), } }); @@ -104,21 +132,19 @@ impl ServicesManager { let mut join_set = JoinSet::new(); for this_instance in &self.inner.instances { - let this_instance_clone = Arc::clone(this_instance); + let this_instance = Arc::clone(this_instance); let other_service_names = other.inner.service_names.clone(); let other_instances = other.inner.instances.clone(); join_set.spawn(async move { - let this_instance = this_instance_clone.lock().await; // If the instance is present in other we check for recreation match other_service_names .iter() .zip(other_instances.iter()) - .find(|(o, _)| *o == &this_instance.service.name) + .find(|(o, _)| *o == &this_instance.config.service.name) { Some((_, other_instance)) => { - let other_instance = other_instance.lock().await; - this_instance.recreate_if_required(&other_instance).await; + this_instance.recreate_if_required(other_instance).await; } None => { // If the new container does not exist in other @@ -146,31 +172,95 @@ impl ServicesManager { self.remove_containers(removed_services).await; } - pub async fn from_config(config: ServiceMangerConfig) -> Result { + /// Get a map of service names to their assigned IP addresses. + /// This is used during reload to preserve IP assignments for existing services. + pub fn get_ip_map(&self) -> HashMap { + self.inner + .service_names + .iter() + .zip(self.inner.instances.iter()) + .map(|(name, instance)| (name.clone(), instance.config.assigned_ip)) + .collect() + } + + pub fn get_proxy_configs(&self) -> Vec { + self.inner + .instances + .iter() + .filter_map(|instance| instance.config.proxy.clone()) + .collect() + } + + pub fn get_certbot_settings(&self) -> Option { + self.inner.certbot.clone() + } + + pub async fn from_config( + mut config: ServiceMangerConfig, + existing_ips: Option>, + ) -> Result { // Get the delay from config (in seconds) let delay = Duration::from_secs(config.entrypoint_file.delay); let mut instances = Vec::new(); let mut networks = Vec::new(); let mut service_names = Vec::new(); + let mut router = HashMap::new(); + let proxy = config.entrypoint_file.proxy; + + // Ensure the default dispenser network exists first + // This network is used by all containers for inter-container communication + if let Err(e) = ensure_default_network().await { + log::error!("Failed to ensure default dispenser network exists: {}", e); + return Err(e); + } - // Process networks first - create NetworkInstance objects + // Process user-defined networks - create NetworkInstance objects for network_entry in config.entrypoint_file.networks { let network = NetworkInstance::from(network_entry); networks.push(network); } - // Ensure all networks exist before creating services + // Ensure all user-defined networks exist before creating services for network in &networks { if let Err(e) = network.ensure_exists().await { log::error!("Failed to ensure network {} exists: {}", network.name, e); - return Err(ServiceConfigError::Io(e)); + return Err(e); } } + // Prune dependencies: Remove dependencies on services that are not being loaded + let loaded_service_names: std::collections::HashSet = config + .services + .iter() + .map(|(_, s)| s.service.name.clone()) + .collect(); + + for (_, service_file) in &mut config.services { + service_file.depends_on.retain(|name, _| { + let exists = loaded_service_names.contains(name); + if !exists { + log::debug!( + "Pruning dependency '{}' from service '{}' as it's not being loaded.", + name, + service_file.service.name + ); + } + exists + }); + } + + // Allocate IP addresses using "Reserve then Fill" strategy + let assigned_ips = allocate_ips(&config.services, existing_ips); + // Iterate through each service entry in the config let mut join_set = JoinSet::new(); for (entry_path, service_file) in config.services { + // Get the assigned IP for this service + let assigned_ip = assigned_ips + .get(&service_file.service.name) + .copied() + .expect("IP should have been allocated for all services"); join_set.spawn(async move { log::debug!("Initializing config for {entry_path:?}"); @@ -187,7 +277,7 @@ impl ServicesManager { let service_name = service_file.service.name.clone(); // Create the ServiceInstance - let instance = ServiceInstance { + let config = ServiceInstanceConfig { dir: entry_path, service: service_file.service, ports: service_file.ports, @@ -197,19 +287,30 @@ impl ServicesManager { network: service_file.network, dispenser: service_file.dispenser, depends_on: service_file.depends_on, + proxy: service_file.proxy, + assigned_ip, + }; + + let instance = ServiceInstance { + config: Arc::new(config), cron_watcher, image_watcher, }; - (service_name, Arc::new(Mutex::new(instance))) + (service_name, Arc::new(instance)) }); } + let mut index = 0; while let Some(result) = join_set.join_next().await { match result { Ok((service_name, instance)) => { + if let Some(proxy_config) = &instance.config.proxy { + router.insert(proxy_config.host.clone(), index); + } service_names.push(service_name); instances.push(instance); + index += 1; } Err(e) => { log::error!("Failed to initialize service: {}", e); @@ -226,6 +327,9 @@ impl ServicesManager { instances, networks, delay, + router, + proxy, + certbot: config.entrypoint_file.certbot, }; Ok(ServicesManager { @@ -257,18 +361,15 @@ impl ServicesManager { last_image_poll = std::time::Instant::now(); } // Scope to release the lock - { - let poll_start = std::time::Instant::now(); - let mut instance = instance.lock().await; - instance.poll(poll_images, init).await; - let poll_duration = poll_start.elapsed(); - log::debug!( - "Polling for {} took {:?}", - instance.service.name, - poll_duration - ); - init = false; - } + let poll_start = std::time::Instant::now(); + instance.poll(poll_images, init).await; + let poll_duration = poll_start.elapsed(); + log::debug!( + "Polling for {} took {:?}", + instance.config.service.name, + poll_duration + ); + init = false; tokio::time::sleep(Duration::from_secs(1)).await; } }) @@ -300,12 +401,11 @@ impl ServicesManager { let mut join_set = JoinSet::new(); for instance in &self.inner.instances { - let instance_clone = Arc::clone(instance); + let instance = Arc::clone(instance); let names_clone = names.clone(); join_set.spawn(async move { - let instance = instance_clone.lock().await; - if names_clone.contains(&instance.service.name) { + if names_clone.contains(&instance.config.service.name) { let _ = instance.stop_container().await; let _ = instance.remove_container().await; } @@ -323,5 +423,238 @@ impl ServicesManager { self.remove_containers(self.inner.service_names.clone()) .await; self.cleanup_networks().await; + + // Remove the default dispenser network after all containers and user networks are cleaned up + if let Err(e) = remove_default_network().await { + log::warn!("Failed to remove default dispenser network: {}", e); + } + } +} + +/// Allocate IP addresses to services using "Reserve then Fill" strategy. +/// +/// This ensures that: +/// 1. Existing services keep their IP addresses (Reserve phase) +/// 2. New services get the lowest available IP addresses (Fill phase) +/// +/// The subnet is 172.28.0.0/16 with gateway at 172.28.0.1, so we start from 172.28.0.2. +fn allocate_ips( + services: &[(PathBuf, crate::service::file::ServiceFile)], + existing_ips: Option>, +) -> HashMap { + let mut assigned: HashMap = HashMap::new(); + let mut used_ips: HashSet = HashSet::new(); + + // Base IP: 172.28.0.0 + let base_ip: u32 = u32::from(Ipv4Addr::new(172, 28, 0, 0)); + + // Reserve the gateway IP (172.28.0.1) + used_ips.insert(Ipv4Addr::new(172, 28, 0, 1)); + + let existing = existing_ips.unwrap_or_default(); + + // Reserve Phase: Preserve IPs for existing services + for (_, service_file) in services { + let service_name = &service_file.service.name; + if let Some(&existing_ip) = existing.get(service_name) { + assigned.insert(service_name.clone(), existing_ip); + used_ips.insert(existing_ip); + log::debug!( + "Reserved existing IP {} for service {}", + existing_ip, + service_name + ); + } + } + + // Fill Phase: Assign new IPs to services that don't have one + // Start from 172.28.0.2 (offset 2 from base) + let mut next_offset: u32 = 2; + + for (_, service_file) in services { + let service_name = &service_file.service.name; + if assigned.contains_key(service_name) { + continue; // Already assigned in reserve phase + } + + // Find the next available IP + loop { + let candidate_ip = Ipv4Addr::from(base_ip + next_offset); + next_offset += 1; + + // Check if we've exceeded the subnet (unlikely with /16) + if next_offset > 65534 { + panic!("Exhausted all available IPs in the dispenser subnet"); + } + + if !used_ips.contains(&candidate_ip) { + assigned.insert(service_name.clone(), candidate_ip); + used_ips.insert(candidate_ip); + log::debug!( + "Assigned new IP {} to service {}", + candidate_ip, + service_name + ); + break; + } + } + } + + assigned +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::service::file::{DispenserConfig, PullOptions, Restart, ServiceEntry, ServiceFile}; + + fn make_service_file(name: &str) -> ServiceFile { + ServiceFile { + service: ServiceEntry { + name: name.to_string(), + image: "test:latest".to_string(), + hostname: None, + user: None, + working_dir: None, + command: None, + entrypoint: None, + memory: None, + cpus: None, + }, + ports: vec![], + volume: vec![], + env: HashMap::new(), + restart: Restart::No, + network: vec![], + dispenser: DispenserConfig { + watch: false, + cron: None, + pull: PullOptions::OnStartup, + initialize: crate::service::file::Initialize::default(), + }, + depends_on: HashMap::new(), + proxy: None, + } + } + + #[test] + fn test_allocate_ips_new_services() { + let services = vec![ + (PathBuf::from("/a"), make_service_file("service-a")), + (PathBuf::from("/b"), make_service_file("service-b")), + (PathBuf::from("/c"), make_service_file("service-c")), + ]; + + let assigned = allocate_ips(&services, None); + + assert_eq!(assigned.len(), 3); + assert_eq!( + assigned.get("service-a"), + Some(&Ipv4Addr::new(172, 28, 0, 2)) + ); + assert_eq!( + assigned.get("service-b"), + Some(&Ipv4Addr::new(172, 28, 0, 3)) + ); + assert_eq!( + assigned.get("service-c"), + Some(&Ipv4Addr::new(172, 28, 0, 4)) + ); + } + + #[test] + fn test_allocate_ips_preserves_existing() { + let services = vec![ + (PathBuf::from("/a"), make_service_file("service-a")), + (PathBuf::from("/b"), make_service_file("service-b")), + (PathBuf::from("/c"), make_service_file("service-c")), + ]; + + let mut existing = HashMap::new(); + existing.insert("service-b".to_string(), Ipv4Addr::new(172, 28, 0, 10)); + + let assigned = allocate_ips(&services, Some(existing)); + + assert_eq!(assigned.len(), 3); + // service-a gets the first available IP + assert_eq!( + assigned.get("service-a"), + Some(&Ipv4Addr::new(172, 28, 0, 2)) + ); + // service-b keeps its existing IP + assert_eq!( + assigned.get("service-b"), + Some(&Ipv4Addr::new(172, 28, 0, 10)) + ); + // service-c gets the next available IP + assert_eq!( + assigned.get("service-c"), + Some(&Ipv4Addr::new(172, 28, 0, 3)) + ); + } + + #[test] + fn test_allocate_ips_skips_used_ips() { + let services = vec![ + (PathBuf::from("/a"), make_service_file("service-a")), + (PathBuf::from("/b"), make_service_file("service-b")), + (PathBuf::from("/c"), make_service_file("service-c")), + ]; + + let mut existing = HashMap::new(); + // Reserve IP .2 for service-b (which is processed second) + existing.insert("service-b".to_string(), Ipv4Addr::new(172, 28, 0, 2)); + + let assigned = allocate_ips(&services, Some(existing)); + + assert_eq!(assigned.len(), 3); + // service-a should skip .2 (used by service-b) and get .3 + assert_eq!( + assigned.get("service-a"), + Some(&Ipv4Addr::new(172, 28, 0, 3)) + ); + // service-b keeps its reserved IP + assert_eq!( + assigned.get("service-b"), + Some(&Ipv4Addr::new(172, 28, 0, 2)) + ); + // service-c gets .4 + assert_eq!( + assigned.get("service-c"), + Some(&Ipv4Addr::new(172, 28, 0, 4)) + ); + } + + #[test] + fn test_allocate_ips_ignores_stale_existing() { + // Test that existing IPs for services no longer in the config are ignored + let services = vec![(PathBuf::from("/a"), make_service_file("service-a"))]; + + let mut existing = HashMap::new(); + existing.insert("service-removed".to_string(), Ipv4Addr::new(172, 28, 0, 5)); + + let assigned = allocate_ips(&services, Some(existing)); + + assert_eq!(assigned.len(), 1); + // service-a gets .2 (the removed service's IP is not reserved) + assert_eq!( + assigned.get("service-a"), + Some(&Ipv4Addr::new(172, 28, 0, 2)) + ); + } + + #[test] + fn test_allocate_ips_gateway_reserved() { + // Ensure gateway IP (172.28.0.1) is never assigned + let services = vec![(PathBuf::from("/a"), make_service_file("service-a"))]; + + let assigned = allocate_ips(&services, None); + + assert_eq!(assigned.len(), 1); + // Should start from .2, not .1 (gateway) + assert_eq!( + assigned.get("service-a"), + Some(&Ipv4Addr::new(172, 28, 0, 2)) + ); } } diff --git a/src/service/manifest.rs b/src/service/manifest.rs index 978a76a..e19c52c 100644 --- a/src/service/manifest.rs +++ b/src/service/manifest.rs @@ -1,7 +1,9 @@ -use tokio::process::Command; - +use bollard::query_parameters::{CreateImageOptions, CreateImageOptionsBuilder}; +use futures_util::StreamExt; use thiserror::Error; +use crate::service::docker::get_docker; + pub type Result = std::result::Result; #[derive(Error, Debug)] @@ -12,51 +14,12 @@ pub enum ImageWatcherError { SerdeJsonError(#[from] serde_json::Error), #[error("IO error: {0}")] IoError(#[from] std::io::Error), + #[error("Docker API error: {0}")] + DockerApiError(#[from] bollard::errors::Error), #[error("Docker command failed: {0}")] DockerCommandFailed(String), } -#[derive(serde::Deserialize)] -pub struct DockerInspectResponse { - #[serde(rename = "RepoDigests")] - repo_digests: Option>, - #[serde(rename = "Id")] - id: Option, -} - -impl DockerInspectResponse { - pub fn get_digest(&self) -> Result { - // Try to get digest from RepoDigests first - if let Some(digests) = self.repo_digests.as_ref() { - if let Some(first_digest) = digests.first() { - // RepoDigests format is like "repository@sha256:..." - if let Some(digest_part) = first_digest.split('@').nth(1) { - let hash = digest_part.strip_prefix("sha256:").ok_or_else(|| { - ImageWatcherError::InvalidDigestPrefix(digest_part.to_string()) - })?; - let mut inner = [0u8; 64]; - inner.copy_from_slice(hash.as_bytes()); - return Ok(Sha256 { inner }); - } - } - } - - // Fallback to Id if RepoDigests is not available - if let Some(id) = self.id.as_ref() { - let hash = id - .strip_prefix("sha256:") - .ok_or_else(|| ImageWatcherError::InvalidDigestPrefix(id.clone()))?; - let mut inner = [0u8; 64]; - inner.copy_from_slice(hash.as_bytes()); - return Ok(Sha256 { inner }); - } - - Err(ImageWatcherError::DockerCommandFailed( - "No digest found in inspect output".to_string(), - )) - } -} - #[derive(Copy, Clone, PartialEq, Eq)] pub struct Sha256 { /// 256 bits of data in base64 @@ -70,12 +33,29 @@ impl std::fmt::Debug for Sha256 { } } -#[derive(Debug, Clone, PartialEq, Eq)] +use tokio::sync::Mutex; + +/// ImageWatcher monitors a Docker image for updates by tracking its digest. +/// +/// # Equality +/// +/// Note: PartialEq and Eq are implemented to compare only the `image` field, +/// ignoring `last_digest`. This allows ImageWatcher instances to be considered +/// equal if they watch the same image, regardless of their current digest state. +#[derive(Debug)] pub struct ImageWatcher { image: Box, - last_digest: Option, + last_digest: Mutex>, } +impl PartialEq for ImageWatcher { + fn eq(&self, other: &Self) -> bool { + self.image == other.image + } +} + +impl Eq for ImageWatcher {} + #[derive(Debug, Copy, Clone)] pub enum ImageWatcherStatus { NotUpdated, @@ -95,10 +75,13 @@ impl ImageWatcher { }; let image = image.into(); - ImageWatcher { image, last_digest } + ImageWatcher { + image, + last_digest: Mutex::new(last_digest), + } } - pub async fn update(&mut self) -> ImageWatcherStatus { - let last_digest = self.last_digest; + pub async fn update(&self) -> ImageWatcherStatus { + let last_digest = *self.last_digest.lock().await; let new_sha256 = get_latest_digest(&self.image).await; match new_sha256 { Err(e) => { @@ -107,7 +90,7 @@ impl ImageWatcher { } Ok(new_sha256) if last_digest == Some(new_sha256) => ImageWatcherStatus::NotUpdated, Ok(new_sha256) => { - self.last_digest = Some(new_sha256); + *self.last_digest.lock().await = Some(new_sha256); log::info!( "Found a new version for {}, update will start soon...", self.image, @@ -118,37 +101,91 @@ impl ImageWatcher { } } +/// Parse an image reference into (image, tag) components +fn parse_image_reference(image: &str) -> (&str, &str) { + // Handle digest references (image@sha256:...) + if let Some(at_pos) = image.find('@') { + return (&image[..at_pos], &image[at_pos..]); + } + + // Handle tag references (image:tag) + // Need to be careful with registry URLs that contain port numbers + // e.g., localhost:5000/myimage:tag + if let Some(colon_pos) = image.rfind(':') { + // Check if the colon is part of a port number in the registry URL + let after_colon = &image[colon_pos + 1..]; + // If there's a slash after the colon, it's a port number, not a tag + if !after_colon.contains('/') { + return (&image[..colon_pos], after_colon); + } + } + + // No tag specified, use "latest" + (image, "latest") +} + async fn get_latest_digest(image: &str) -> Result { - // First, pull the latest image - let pull_result = Command::new("docker") - .args(["pull"]) - .arg(image) - .output() - .await?; - - if !pull_result.status.success() { - return Err(ImageWatcherError::DockerCommandFailed( - String::from_utf8_lossy(&pull_result.stderr).to_string(), - )); + let docker = get_docker(); + + // Parse image name and tag + let (image_name, tag) = parse_image_reference(image); + + // Pull the latest image using bollard + let options: CreateImageOptions = CreateImageOptionsBuilder::new() + .from_image(image_name) + .tag(tag) + .build(); + + let mut stream = docker.create_image(Some(options), None, None); + + while let Some(result) = stream.next().await { + match result { + Ok(info) => { + if let Some(status) = info.status { + log::debug!("Pull status: {}", status); + } + } + Err(e) => { + return Err(ImageWatcherError::DockerApiError(e)); + } + } } - // Then, inspect the image to get its digest - let inspect_result = Command::new("docker") - .args(["inspect"]) - .arg(image) - .output() - .await?; - - if !inspect_result.status.success() { - return Err(ImageWatcherError::DockerCommandFailed( - String::from_utf8_lossy(&inspect_result.stderr).to_string(), - )); + // Inspect the image to get its digest + let inspect = docker.inspect_image(image).await?; + + // Try to get digest from RepoDigests first + if let Some(repo_digests) = inspect.repo_digests { + if let Some(first_digest) = repo_digests.first() { + // RepoDigests format is like "repository@sha256:..." + if let Some(digest_part) = first_digest.split('@').nth(1) { + let hash = digest_part.strip_prefix("sha256:").ok_or_else(|| { + ImageWatcherError::InvalidDigestPrefix(digest_part.to_string()) + })?; + let mut inner = [0u8; 64]; + let hash_bytes = hash.as_bytes(); + if hash_bytes.len() >= 64 { + inner.copy_from_slice(&hash_bytes[..64]); + return Ok(Sha256 { inner }); + } + } + } + } + + // Fallback to Id if RepoDigests is not available + if let Some(id) = inspect.id { + let hash = id + .strip_prefix("sha256:") + .ok_or_else(|| ImageWatcherError::InvalidDigestPrefix(id.clone()))?; + let mut inner = [0u8; 64]; + let hash_bytes = hash.as_bytes(); + if hash_bytes.len() >= 64 { + inner.copy_from_slice(&hash_bytes[..64]); + return Ok(Sha256 { inner }); + } } - let val: Vec = serde_json::from_slice(&inspect_result.stdout)?; - val.first() - .ok_or_else(|| { - ImageWatcherError::DockerCommandFailed("Empty inspect response".to_string()) - })? - .get_digest() + Err(ImageWatcherError::DockerCommandFailed( + "No digest found in inspect output".to_string(), + )) } diff --git a/src/service/mod.rs b/src/service/mod.rs index 559424f..bf06c17 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,3 +1,5 @@ +pub mod cron_watcher; +pub mod docker; pub mod file; pub mod instance; pub mod manager; diff --git a/src/service/network.rs b/src/service/network.rs index 34edb30..0f6a205 100644 --- a/src/service/network.rs +++ b/src/service/network.rs @@ -3,6 +3,12 @@ //! This module provides functionality to manage Docker networks from the entrypoint configuration. //! Networks are created before services start and can be cleaned up on shutdown. //! +//! # Default Network +//! +//! Dispenser automatically creates a default network (`dispenser`) that all containers +//! are connected to. This network uses a bridge driver with a specific subnet +//! (172.28.0.0/16) to provide predictable IP addresses for containers. +//! //! # Example //! //! Networks are defined in the entrypoint file (e.g., `dispenser.toml`): @@ -26,7 +32,24 @@ use std::collections::HashMap; -use crate::service::file::{NetworkDeclarationEntry, NetworkDriver}; +use bollard::models::{Ipam, IpamConfig, NetworkCreateRequest}; +use bollard::query_parameters::{InspectNetworkOptions, InspectNetworkOptionsBuilder}; + +use crate::service::vars::ServiceConfigError; +use crate::service::{ + docker::get_docker, + file::{NetworkDeclarationEntry, NetworkDriver}, +}; + +/// The name of the default dispenser network that all containers are connected to. +pub const DEFAULT_NETWORK_NAME: &str = "dispenser"; + +/// The subnet for the default dispenser network. +/// This provides a /16 network with 65,534 usable host addresses. +pub const DEFAULT_NETWORK_SUBNET: &str = "172.28.0.0/16"; + +/// The gateway IP for the default dispenser network. +pub const DEFAULT_NETWORK_GATEWAY: &str = "172.28.0.1"; pub struct NetworkInstance { pub name: String, @@ -35,6 +58,10 @@ pub struct NetworkInstance { pub internal: bool, pub attachable: bool, pub labels: HashMap, + /// Optional subnet configuration for the network (CIDR notation) + pub subnet: Option, + /// Optional gateway IP for the network + pub gateway: Option, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -52,27 +79,48 @@ impl From for NetworkInstance { internal: entry.internal, attachable: entry.attachable, labels: entry.labels, + subnet: None, + gateway: None, } } } impl NetworkInstance { - /// Check if a network exists - pub async fn check_network(&self) -> Result { - let output = tokio::process::Command::new("docker") - .args(["network", "inspect", &self.name]) - .output() - .await?; - - if output.status.success() { - Ok(NetworkStatus::Exists) - } else { - Ok(NetworkStatus::NotFound) + /// Create the default dispenser network instance. + /// This network is automatically created and all containers are connected to it. + pub fn default_network() -> Self { + let mut labels = HashMap::new(); + labels.insert("managed-by".to_string(), "dispenser".to_string()); + + Self { + name: DEFAULT_NETWORK_NAME.to_string(), + driver: NetworkDriver::Bridge, + external: false, + internal: false, + attachable: true, + labels, + subnet: Some(DEFAULT_NETWORK_SUBNET.to_string()), + gateway: Some(DEFAULT_NETWORK_GATEWAY.to_string()), + } + } + + /// Check if a network exists using bollard + pub async fn check_network(&self) -> Result { + let docker = get_docker(); + + let options: InspectNetworkOptions = InspectNetworkOptionsBuilder::new().build(); + + match docker.inspect_network(&self.name, Some(options)).await { + Ok(_) => Ok(NetworkStatus::Exists), + Err(bollard::errors::Error::DockerResponseServerError { + status_code: 404, .. + }) => Ok(NetworkStatus::NotFound), + Err(e) => Err(ServiceConfigError::DockerApi(e)), } } - /// Create the network if it doesn't exist - pub async fn create_network(&self) -> Result<(), std::io::Error> { + /// Create the network if it doesn't exist using bollard + pub async fn create_network(&self) -> Result<(), ServiceConfigError> { // If external, we don't create it - it should already exist if self.external { log::info!( @@ -91,53 +139,64 @@ impl NetworkInstance { log::info!("Creating network: {}", self.name); - let mut cmd = tokio::process::Command::new("docker"); - cmd.args(["network", "create"]); + let docker = get_docker(); - // Add driver - let driver_str = match self.driver { + let driver = match self.driver { NetworkDriver::Bridge => "bridge", NetworkDriver::Host => "host", NetworkDriver::Overlay => "overlay", NetworkDriver::Macvlan => "macvlan", NetworkDriver::None => "none", }; - cmd.args(["--driver", driver_str]); - - // Add internal flag - if self.internal { - cmd.arg("--internal"); - } - // Add attachable flag (useful for overlay networks) - if self.attachable { - cmd.arg("--attachable"); - } + // Build IPAM configuration if subnet is specified + let ipam = if self.subnet.is_some() || self.gateway.is_some() { + let ipam_config = IpamConfig { + subnet: self.subnet.clone(), + gateway: self.gateway.clone(), + ip_range: None, + auxiliary_addresses: None, + }; - // Add labels - for (key, value) in &self.labels { - cmd.args(["--label", &format!("{}={}", key, value)]); - } - - // Add the network name - cmd.arg(&self.name); + Some(Ipam { + driver: Some("default".to_string()), + config: Some(vec![ipam_config]), + options: None, + }) + } else { + None + }; - let output = cmd.output().await?; + let request = NetworkCreateRequest { + name: self.name.clone(), + driver: Some(driver.to_string()), + internal: Some(self.internal), + attachable: Some(self.attachable), + labels: Some(self.labels.clone()), + ipam, + ..Default::default() + }; - if output.status.success() { - log::info!("Network {} created successfully", self.name); - Ok(()) - } else { - let error_msg = String::from_utf8_lossy(&output.stderr); - log::error!("Failed to create network {}: {}", self.name, error_msg); - Err(std::io::Error::other( - format!("Failed to create network: {}", error_msg), - )) + match docker.create_network(request).await { + Ok(_) => { + log::info!("Network {} created successfully", self.name); + if let Some(ref subnet) = self.subnet { + log::info!(" Subnet: {}", subnet); + } + if let Some(ref gateway) = self.gateway { + log::info!(" Gateway: {}", gateway); + } + Ok(()) + } + Err(e) => { + log::error!("Failed to create network {}: {}", self.name, e); + Err(ServiceConfigError::DockerApi(e)) + } } } - /// Remove the network - pub async fn remove_network(&self) -> Result<(), std::io::Error> { + /// Remove the network using bollard + pub async fn remove_network(&self) -> Result<(), ServiceConfigError> { // Don't remove external networks if self.external { log::info!( @@ -149,25 +208,30 @@ impl NetworkInstance { log::info!("Removing network: {}", self.name); - let output = tokio::process::Command::new("docker") - .args(["network", "rm", &self.name]) - .output() - .await?; + let docker = get_docker(); - if output.status.success() { - log::info!("Network {} removed successfully", self.name); - Ok(()) - } else { - let error_msg = String::from_utf8_lossy(&output.stderr); - log::warn!("Failed to remove network {}: {}", self.name, error_msg); - // Don't return error for removal failures as they might be expected - // (e.g., network still in use by containers) - Ok(()) + match docker.remove_network(&self.name).await { + Ok(_) => { + log::info!("Network {} removed successfully", self.name); + Ok(()) + } + Err(bollard::errors::Error::DockerResponseServerError { + status_code: 404, .. + }) => { + log::info!("Network {} not found, skipping removal", self.name); + Ok(()) + } + Err(e) => { + log::warn!("Failed to remove network {}: {}", self.name, e); + // Don't return error for removal failures as they might be expected + // (e.g., network still in use by containers) + Ok(()) + } } } /// Ensure the network exists (create if needed) - pub async fn ensure_exists(&self) -> Result<(), std::io::Error> { + pub async fn ensure_exists(&self) -> Result<(), ServiceConfigError> { let status = self.check_network().await?; match status { @@ -181,10 +245,7 @@ impl NetworkInstance { "External network {} does not exist. Please create it manually.", self.name ); - Err(std::io::Error::new( - std::io::ErrorKind::NotFound, - format!("External network {} not found", self.name), - )) + Err(ServiceConfigError::NetworkNotFound(self.name.clone())) } else { self.create_network().await } @@ -192,3 +253,17 @@ impl NetworkInstance { } } } + +/// Ensure the default dispenser network exists. +/// This should be called during manager initialization before any containers are created. +pub async fn ensure_default_network() -> Result<(), ServiceConfigError> { + let default_network = NetworkInstance::default_network(); + default_network.ensure_exists().await +} + +/// Remove the default dispenser network. +/// This should be called during shutdown after all containers have been removed. +pub async fn remove_default_network() -> Result<(), ServiceConfigError> { + let default_network = NetworkInstance::default_network(); + default_network.remove_network().await +} diff --git a/src/service/vars.rs b/src/service/vars.rs index 4dcb39b..1aed255 100644 --- a/src/service/vars.rs +++ b/src/service/vars.rs @@ -1,5 +1,6 @@ use minijinja::Environment; use serde::{Deserialize, Serialize}; +use std::string::FromUtf8Error; use std::{collections::HashMap, path::Path, path::PathBuf}; use crate::secrets; @@ -65,7 +66,7 @@ pub struct ServiceVarsMaterialized { impl ServiceVarsMaterialized { pub async fn try_init() -> Result { - let vars_raw = ServiceVars::try_init()?; + let vars_raw = ServiceVars::try_init().await?; vars_raw.materialize().await } } @@ -81,7 +82,7 @@ impl Serialize for ServiceVarsMaterialized { /// Files that match dispenser.vars | *.dispenser.vars /// Sorted -fn list_vars_files() -> Vec { +async fn list_vars_files() -> Vec { let mut files = Vec::new(); let cli_args = crate::cli::get_cli_args(); @@ -92,13 +93,15 @@ fn list_vars_files() -> Vec { p } }); - if let Ok(entries) = std::fs::read_dir(search_dir) { - for entry in entries.filter_map(|e| e.ok()) { + if let Ok(mut entries) = tokio::fs::read_dir(search_dir).await { + while let Ok(Some(entry)) = entries.next_entry().await { let path = entry.path(); - if path.is_file() { - if let Some(file_name) = path.file_name().and_then(|s| s.to_str()) { - if file_name == "dispenser.vars" || file_name.ends_with(".dispenser.vars") { - files.push(path); + if let Ok(file_type) = entry.file_type().await { + if file_type.is_file() { + if let Some(file_name) = path.file_name().and_then(|s| s.to_str()) { + if file_name == "dispenser.vars" || file_name.ends_with(".dispenser.vars") { + files.push(path); + } } } } @@ -124,20 +127,15 @@ impl ServiceVars { } } - fn try_init() -> Result { - use std::io::Read; + async fn try_init() -> Result { let mut vars = Vec::new(); - let vars_files = list_vars_files(); + let vars_files = list_vars_files().await; for vars_file in vars_files { - match std::fs::File::open(vars_file) { - Ok(mut file) => { - let mut this_vars = String::new(); - file.read_to_string(&mut this_vars)?; - match Self::try_init_from_string(&this_vars) { - Ok(this_vars) => vars.push(this_vars), - Err(e) => log::error!("Error parsing vars file: {e}"), - } - } + match tokio::fs::read_to_string(&vars_file).await { + Ok(this_vars) => match Self::try_init_from_string(&this_vars) { + Ok(this_vars) => vars.push(this_vars), + Err(e) => log::error!("Error parsing vars file: {e}"), + }, Err(e) => log::error!("Error reading vars file: {e}"), } } @@ -160,6 +158,16 @@ pub enum ServiceConfigError { GcpSecretFetch(#[from] google_cloud_secretmanager_v1::Error), #[error("GCP Client error: {0}")] GcpClient(#[from] google_cloud_gax::client_builder::Error), + #[error("Docker API error: {0}")] + DockerApi(#[from] bollard::errors::Error), + #[error("Network not found: {0}")] + NetworkNotFound(String), +} + +impl From for ServiceConfigError { + fn from(value: FromUtf8Error) -> Self { + Self::Utf8(value.utf8_error()) + } } pub fn render_template( diff --git a/src/signals.rs b/src/signals.rs index 551fe16..1a69738 100644 --- a/src/signals.rs +++ b/src/signals.rs @@ -8,10 +8,10 @@ use std::process::ExitCode; use std::sync::Arc; use tokio::sync::Mutex; -pub fn send_signal(signal: crate::cli::Signal) -> ExitCode { +pub async fn send_signal(signal: crate::cli::Signal) -> ExitCode { let pid_file = &crate::cli::get_cli_args().pid_file; - let pid = match std::fs::read_to_string(pid_file) { + let pid = match tokio::fs::read_to_string(pid_file).await { Ok(pid) => pid, Err(err) => { eprintln!("Unable to read pid file: {err}"); @@ -49,18 +49,6 @@ pub fn handle_sigint(sigint_signal: Arc) { } }); } -pub async fn sigint_manager( - manager_holder: Arc>>, -) -> Result<(), String> { - let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]); - - log::info!("Shutting down..."); - - let manager = manager_holder.lock().await; - manager.cancel().await; - manager.shutdown().await; - Ok(()) -} pub fn handle_reload(reload_signal: Arc) { let mut signals = Signals::new([SIGHUP]).expect("No signals :("); @@ -75,13 +63,19 @@ pub fn handle_reload(reload_signal: Arc) { pub async fn reload_manager( manager_holder: Arc>>, + service_filter: Option<&[String]>, ) -> Result<(), String> { let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Reloading]); log::info!("Reloading configuration..."); - // Load the new configuration - let service_manager_config = match ServiceMangerConfig::try_init().await { + // Snapshot the existing IP assignments before loading new config + let existing_ips = { + let manager = manager_holder.lock().await; + manager.get_ip_map() + }; + + let service_manager_config = match ServiceMangerConfig::try_init(service_filter).await { Ok(entrypoint_file) => entrypoint_file, Err(e) => { log::error!("Failed to reload entrypoint file: {e:?}"); @@ -90,15 +84,16 @@ pub async fn reload_manager( } }; - // Create a new manager with the new configuration - let new_manager = match ServicesManager::from_config(service_manager_config).await { - Ok(manager) => Arc::new(manager), - Err(e) => { - log::error!("Failed to create new services manager: {e}"); - let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]); - return Err(format!("Failed to create new services manager: {e}")); - } - }; + // Create a new manager with the new configuration, passing existing IPs + let new_manager = + match ServicesManager::from_config(service_manager_config, Some(existing_ips)).await { + Ok(manager) => Arc::new(manager), + Err(e) => { + log::error!("Failed to create new services manager: {e}"); + let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]); + return Err(format!("Failed to create new services manager: {e}")); + } + }; log::info!("New configuration loaded successfully"); @@ -107,6 +102,9 @@ pub async fn reload_manager( let mut holder = manager_holder.lock().await; let old = holder.clone(); *holder = Arc::clone(&new_manager); + if old.proxy_enabled() != new_manager.proxy_enabled() { + log::warn!("proxy.enabled changed between reloads. This is not supported. Please restart dispenser to enable/disable the proxy."); + } old };