diff --git a/Cargo.lock b/Cargo.lock index 850260587eb..60660425d47 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6429,6 +6429,7 @@ dependencies = [ "test-strategy", "thiserror 1.0.69", "update-engine", + "url", "uuid", ] @@ -7045,8 +7046,10 @@ dependencies = [ "headers", "hex", "hickory-resolver", + "hmac", "http", "http-body-util", + "httpmock", "httptest", "hubtools", "hyper", @@ -7060,6 +7063,7 @@ dependencies = [ "itertools 0.14.0", "lldpd-client", "macaddr", + "maplit", "mg-admin-client", "nexus-auth", "nexus-client", @@ -7654,6 +7658,7 @@ dependencies = [ "toml_edit 0.22.24", "tracing", "unicode-xid", + "url", "usdt", "usdt-impl", "uuid", @@ -10509,6 +10514,7 @@ dependencies = [ "semver 1.0.25", "serde", "serde_json", + "url", "uuid", ] @@ -13040,6 +13046,7 @@ dependencies = [ "form_urlencoded", "idna", "percent-encoding", + "serde", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index f8ba5598f5c..b480b13022a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -421,6 +421,7 @@ hickory-resolver = "0.24.4" hickory-server = "0.24.4" highway = "1.3.0" hkdf = "0.12.4" +hmac = "0.12.1" http = "1.2.0" http-body = "1.0.1" http-body-util = "0.1.2" diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index 5b296d4693b..ae8ee875d77 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -1040,6 +1040,9 @@ pub enum ResourceType { Probe, ProbeNetworkInterface, LldpLinkConfig, + WebhookEvent, + WebhookReceiver, + WebhookSecret, } // IDENTITY METADATA diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index e38f782a784..a8a864ca4fb 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -22,6 +22,7 @@ use crate::check_allow_destructive::DestructiveOperationToken; use crate::helpers::CONNECTION_OPTIONS_HEADING; use crate::helpers::DATABASE_OPTIONS_HEADING; use crate::helpers::const_max_len; +use crate::helpers::display_option_blank; use anyhow::Context; use anyhow::bail; use async_bb8_diesel::AsyncConnection; @@ -7126,11 +7127,6 @@ async fn cmd_db_oximeter_list_producers( Ok(()) } -// Display an empty cell for an Option if it's None. -fn display_option_blank(opt: &Option) -> String { - opt.as_ref().map(|x| x.to_string()).unwrap_or_else(|| "".to_string()) -} - // Format a `chrono::DateTime` in RFC3339 with milliseconds precision and using // `Z` rather than the UTC offset for UTC timestamps, to save a few characters // of line width in tabular output. diff --git a/dev-tools/omdb/src/bin/omdb/helpers.rs b/dev-tools/omdb/src/bin/omdb/helpers.rs index 2ee82ff908b..d431c807fea 100644 --- a/dev-tools/omdb/src/bin/omdb/helpers.rs +++ b/dev-tools/omdb/src/bin/omdb/helpers.rs @@ -31,3 +31,9 @@ pub(crate) const fn const_max_len(strs: &[&str]) -> usize { } max } +// Display an empty cell for an Option if it's None. +pub(crate) fn display_option_blank( + opt: &Option, +) -> String { + opt.as_ref().map(|x| x.to_string()).unwrap_or_else(|| "".to_string()) +} diff --git a/dev-tools/omdb/src/bin/omdb/nexus.rs b/dev-tools/omdb/src/bin/omdb/nexus.rs index 5e40011412c..fd24eae1b95 100644 --- a/dev-tools/omdb/src/bin/omdb/nexus.rs +++ b/dev-tools/omdb/src/bin/omdb/nexus.rs @@ -9,6 +9,7 @@ use crate::check_allow_destructive::DestructiveOperationToken; use crate::db::DbUrlOptions; use crate::helpers::CONNECTION_OPTIONS_HEADING; use crate::helpers::const_max_len; +use crate::helpers::display_option_blank; use crate::helpers::should_colorize; use anyhow::Context; use anyhow::bail; @@ -962,6 +963,12 @@ fn print_task_details(bgtask: &BackgroundTask, details: &serde_json::Value) { "tuf_artifact_replication" => { print_task_tuf_artifact_replication(details); } + "webhook_dispatcher" => { + print_task_webhook_dispatcher(details); + } + "webhook_deliverator" => { + print_task_webhook_deliverator(details); + } _ => { println!( "warning: unknown background task: {:?} \ @@ -2259,6 +2266,296 @@ fn print_task_tuf_artifact_replication(details: &serde_json::Value) { } } +fn print_task_webhook_dispatcher(details: &serde_json::Value) { + use nexus_types::internal_api::background::WebhookDispatched; + use nexus_types::internal_api::background::WebhookDispatcherStatus; + use nexus_types::internal_api::background::WebhookGlobStatus; + + let WebhookDispatcherStatus { + globs_reprocessed, + glob_version, + errors, + dispatched, + no_receivers, + } = match serde_json::from_value::(details.clone()) + { + Err(error) => { + eprintln!( + "warning: failed to interpret task details: {:?}: {:?}", + error, details + ); + return; + } + Ok(status) => status, + }; + + if !errors.is_empty() { + println!( + " task did not complete successfully! ({} errors)", + errors.len() + ); + for line in &errors { + println!(" > {line}"); + } + } + + const DISPATCHED: &str = "events dispatched:"; + const NO_RECEIVERS: &str = "events with no receivers subscribed:"; + const OUTDATED_GLOBS: &str = "outdated glob subscriptions:"; + const GLOBS_REPROCESSED: &str = "glob subscriptions reprocessed:"; + const ALREADY_REPROCESSED: &str = + "globs already reprocessed by another Nexus:"; + const GLOB_ERRORS: &str = "globs that failed to be reprocessed"; + const WIDTH: usize = const_max_len(&[ + DISPATCHED, + NO_RECEIVERS, + OUTDATED_GLOBS, + GLOBS_REPROCESSED, + ALREADY_REPROCESSED, + GLOB_ERRORS, + ]) + 1; + const NUM_WIDTH: usize = 3; + + println!(" {DISPATCHED:NUM_WIDTH$}", dispatched.len()); + if !dispatched.is_empty() { + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct DispatchedRow { + // Don't include the typed UUID's kind in the table, which the + // TypedUuid fmt::Display impl will do... + event: Uuid, + subscribed: usize, + dispatched: usize, + } + let table_rows = dispatched.iter().map( + |&WebhookDispatched { event_id, subscribed, dispatched }| { + DispatchedRow { + event: event_id.into_untyped_uuid(), + subscribed, + dispatched, + } + }, + ); + let table = tabled::Table::new(table_rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + println!("{}", textwrap::indent(&table.to_string(), " ")); + } + + println!(" {NO_RECEIVERS:NUM_WIDTH$}", no_receivers.len()); + for event in no_receivers { + println!(" {event:?}"); + } + + let total_globs: usize = + globs_reprocessed.values().map(|globs| globs.len()).sum(); + if total_globs > 0 { + let mut reprocessed = 0; + let mut already_reprocessed = 0; + let mut glob_errors = 0; + println!(" {OUTDATED_GLOBS:NUM_WIDTH$}"); + println!(" current schema version: {glob_version}"); + for (rx_id, globs) in globs_reprocessed { + if globs.is_empty() { + continue; + } + println!(" receiver {rx_id:?}:"); + for (glob, status) in globs { + match status { + Ok(WebhookGlobStatus::AlreadyReprocessed) => { + println!(" > {glob:?}: already reprocessed"); + already_reprocessed += 1; + } + Ok(WebhookGlobStatus::Reprocessed { + created, + deleted, + prev_version, + }) => { + println!( + " > {glob:?}: previously at \ + {prev_version}\n \ + exact subscriptions: {created:>NUM_WIDTH$} \ + created, {deleted:>NUM_WIDTH$} deleted", + ); + reprocessed += 1; + } + Err(e) => { + println!(" > {glob:?}: FAILED: {e}"); + glob_errors += 1; + } + } + } + } + println!(" {GLOBS_REPROCESSED:NUM_WIDTH$}"); + println!( + " {ALREADY_REPROCESSED:NUM_WIDTH$}", + already_reprocessed + ); + println!( + "{} {GLOB_ERRORS:NUM_WIDTH$}", + warn_if_nonzero(glob_errors), + ); + } +} +fn print_task_webhook_deliverator(details: &serde_json::Value) { + use nexus_types::external_api::views::WebhookDeliveryAttemptResult; + use nexus_types::internal_api::background::WebhookDeliveratorStatus; + use nexus_types::internal_api::background::WebhookDeliveryFailure; + use nexus_types::internal_api::background::WebhookRxDeliveryStatus; + + let WebhookDeliveratorStatus { by_rx, error } = match serde_json::from_value::< + WebhookDeliveratorStatus, + >(details.clone()) + { + Err(error) => { + eprintln!( + "warning: failed to interpret task details: {:?}: {:?}", + error, details + ); + return; + } + Ok(status) => status, + }; + + if let Some(error) = error { + println!(" task did not complete successfully:\n {error}"); + } + const RECEIVERS: &str = "receivers:"; + const TOTAL_OK: &str = "successful deliveries:"; + const TOTAL_FAILED: &str = "failed deliveries:"; + const TOTAL_ALREADY_DELIVERED: &str = "already delivered by another Nexus:"; + const TOTAL_IN_PROGRESS: &str = "in progress by another Nexus:"; + const TOTAL_ERRORS: &str = "internal delivery errors:"; + const WIDTH: usize = const_max_len(&[ + RECEIVERS, + TOTAL_OK, + TOTAL_FAILED, + TOTAL_ALREADY_DELIVERED, + TOTAL_IN_PROGRESS, + TOTAL_ERRORS, + ]) + 1; + const NUM_WIDTH: usize = 3; + + let mut total_ok = 0; + let mut total_already_delivered = 0; + let mut total_in_progress = 0; + let mut total_failed = 0; + let mut total_errors = 0; + println!(" {RECEIVERS:NUM_WIDTH$}", by_rx.len()); + for (rx_id, status) in by_rx { + let WebhookRxDeliveryStatus { + ready, + delivered_ok, + already_delivered, + in_progress, + failed_deliveries, + delivery_errors, + error, + } = status; + println!(" > {rx_id:?}: {ready}"); + + const SUCCESSFUL: &str = "successfully delivered:"; + const FAILED: &str = "failed:"; + const IN_PROGRESS: &str = "in progress elsewhere:"; + const ALREADY_DELIVERED: &str = "already delivered:"; + const ERRORS: &str = "internal errors:"; + const WIDTH: usize = const_max_len(&[ + SUCCESSFUL, + FAILED, + IN_PROGRESS, + ALREADY_DELIVERED, + ERRORS, + ]) + 1; + const NUM_WIDTH: usize = 3; + + println!(" {SUCCESSFUL:NUM_WIDTH$}"); + println!( + " {ALREADY_DELIVERED:NUM_WIDTH$}", + already_delivered, + ); + println!(" {IN_PROGRESS:NUM_WIDTH$}"); + total_ok += delivered_ok; + total_already_delivered += total_already_delivered; + total_in_progress += in_progress; + let n_failed = failed_deliveries.len(); + total_failed += n_failed; + println!(" {FAILED:NUM_WIDTH$}"); + if n_failed > 0 { + #[derive(Tabled)] + #[tabled(rename_all = "SCREAMING_SNAKE_CASE")] + struct FailureRow { + event: Uuid, + delivery: Uuid, + #[tabled(rename = "#")] + attempt: usize, + result: WebhookDeliveryAttemptResult, + #[tabled(display_with = "display_option_blank")] + status: Option, + #[tabled(display_with = "display_option_blank")] + duration: Option, + } + let table_rows = failed_deliveries.into_iter().map( + |WebhookDeliveryFailure { + delivery_id, + event_id, + attempt, + result, + response_status, + response_duration, + }| FailureRow { + // Turn these into untyped `Uuid`s so that the Display impl + // doesn't include the UUID kind in the table. + delivery: delivery_id.into_untyped_uuid(), + event: event_id.into_untyped_uuid(), + attempt, + result, + status: response_status, + duration: response_duration, + }, + ); + let table = tabled::Table::new(table_rows) + .with(tabled::settings::Style::empty()) + .with(tabled::settings::Padding::new(0, 1, 0, 0)) + .to_string(); + println!("{}", textwrap::indent(&table.to_string(), " ")); + } + let n_internal_errors = + delivery_errors.len() + if error.is_some() { 1 } else { 0 }; + if n_internal_errors > 0 { + total_errors += n_internal_errors; + println!( + "/!\\ {ERRORS:NUM_WIDTH$}", + n_internal_errors, + ); + if let Some(error) = error { + println!(" > {error}") + } + for (id, error) in delivery_errors { + println!(" > {id:?}: {error}") + } + } + } + println!(" {TOTAL_OK:NUM_WIDTH$}"); + println!(" {TOTAL_FAILED:NUM_WIDTH$}"); + println!( + "{} {TOTAL_ERRORS:NUM_WIDTH$}", + warn_if_nonzero(total_errors), + ); + println!( + " {TOTAL_ALREADY_DELIVERED:NUM_WIDTH$}", + total_already_delivered + ); + println!( + " {TOTAL_IN_PROGRESS:NUM_WIDTH$}", + total_in_progress + ); +} + +fn warn_if_nonzero(n: usize) -> &'static str { + if n > 0 { "/!\\" } else { " " } +} + /// Summarizes an `ActivationReason` fn reason_str(reason: &ActivationReason) -> &'static str { match reason { diff --git a/dev-tools/omdb/tests/env.out b/dev-tools/omdb/tests/env.out index cf5eb09cad2..0e6a62d6478 100644 --- a/dev-tools/omdb/tests/env.out +++ b/dev-tools/omdb/tests/env.out @@ -191,6 +191,14 @@ task: "vpc_route_manager" propagates updated VPC routes to all OPTE ports +task: "webhook_deliverator" + sends webhook delivery requests + + +task: "webhook_dispatcher" + dispatches queued webhook events to receivers + + --------------------------------------------- stderr: note: using Nexus URL http://127.0.0.1:REDACTED_PORT @@ -379,6 +387,14 @@ task: "vpc_route_manager" propagates updated VPC routes to all OPTE ports +task: "webhook_deliverator" + sends webhook delivery requests + + +task: "webhook_dispatcher" + dispatches queued webhook events to receivers + + --------------------------------------------- stderr: note: Nexus URL not specified. Will pick one from DNS. @@ -554,6 +570,14 @@ task: "vpc_route_manager" propagates updated VPC routes to all OPTE ports +task: "webhook_deliverator" + sends webhook delivery requests + + +task: "webhook_dispatcher" + dispatches queued webhook events to receivers + + --------------------------------------------- stderr: note: Nexus URL not specified. Will pick one from DNS. diff --git a/dev-tools/omdb/tests/successes.out b/dev-tools/omdb/tests/successes.out index d1c2108b44a..ac3559cced7 100644 --- a/dev-tools/omdb/tests/successes.out +++ b/dev-tools/omdb/tests/successes.out @@ -399,6 +399,14 @@ task: "vpc_route_manager" propagates updated VPC routes to all OPTE ports +task: "webhook_deliverator" + sends webhook delivery requests + + +task: "webhook_dispatcher" + dispatches queued webhook events to receivers + + --------------------------------------------- stderr: note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ @@ -775,6 +783,26 @@ task: "vpc_route_manager" started at (s ago) and ran for ms warning: unknown background task: "vpc_route_manager" (don't know how to interpret details: Object {}) +task: "webhook_deliverator" + configured period: every m + currently executing: no + last completed activation: , triggered by a periodic timer firing + started at (s ago) and ran for ms + receivers: 0 + successful deliveries: 0 + failed deliveries: 0 + internal delivery errors: 0 + already delivered by another Nexus: 0 + in progress by another Nexus: 0 + +task: "webhook_dispatcher" + configured period: every m + currently executing: no + last completed activation: , triggered by a periodic timer firing + started at (s ago) and ran for ms + events dispatched: 0 + events with no receivers subscribed: 0 + --------------------------------------------- stderr: note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ @@ -1274,6 +1302,26 @@ task: "vpc_route_manager" started at (s ago) and ran for ms warning: unknown background task: "vpc_route_manager" (don't know how to interpret details: Object {}) +task: "webhook_deliverator" + configured period: every m + currently executing: no + last completed activation: , triggered by a periodic timer firing + started at (s ago) and ran for ms + receivers: 0 + successful deliveries: 0 + failed deliveries: 0 + internal delivery errors: 0 + already delivered by another Nexus: 0 + in progress by another Nexus: 0 + +task: "webhook_dispatcher" + configured period: every m + currently executing: no + last completed activation: , triggered by a periodic timer firing + started at (s ago) and ran for ms + events dispatched: 0 + events with no receivers subscribed: 0 + --------------------------------------------- stderr: note: using Nexus URL http://127.0.0.1:REDACTED_PORT/ diff --git a/nexus-config/src/nexus_config.rs b/nexus-config/src/nexus_config.rs index fb83226bdef..48d2b04d089 100644 --- a/nexus-config/src/nexus_config.rs +++ b/nexus-config/src/nexus_config.rs @@ -421,6 +421,10 @@ pub struct BackgroundTaskConfig { /// configuration for read-only region replacement start task pub read_only_region_replacement_start: ReadOnlyRegionReplacementStartConfig, + /// configuration for webhook dispatcher task + pub webhook_dispatcher: WebhookDispatcherConfig, + /// configuration for webhook deliverator task + pub webhook_deliverator: WebhookDeliveratorConfig, } #[serde_as] @@ -745,6 +749,46 @@ pub struct ReadOnlyRegionReplacementStartConfig { pub period_secs: Duration, } +#[serde_as] +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct WebhookDispatcherConfig { + /// period (in seconds) for periodic activations of this background task + #[serde_as(as = "DurationSeconds")] + pub period_secs: Duration, +} + +#[serde_as] +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct WebhookDeliveratorConfig { + /// period (in seconds) for periodic activations of this background task + #[serde_as(as = "DurationSeconds")] + pub period_secs: Duration, + + /// duration after which another Nexus' lease on a delivery attempt is + /// considered expired. + /// + /// this is tuneable to allow testing lease expiration without having to + /// wait a long time. + #[serde(default = "WebhookDeliveratorConfig::default_lease_timeout_secs")] + pub lease_timeout_secs: u64, + + /// backoff period for the first retry of a failed delivery attempt. + /// + /// this is tuneable to allow testing delivery retries without having to + /// wait a long time. + #[serde(default = "WebhookDeliveratorConfig::default_first_retry_backoff")] + pub first_retry_backoff_secs: u64, + + /// backoff period for the second retry of a failed delivery attempt. + /// + /// this is tuneable to allow testing delivery retries without having to + /// wait a long time. + #[serde( + default = "WebhookDeliveratorConfig::default_second_retry_backoff" + )] + pub second_retry_backoff_secs: u64, +} + /// Configuration for a nexus server #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] pub struct PackageConfig { @@ -816,6 +860,20 @@ impl std::fmt::Display for SchemeName { } } +impl WebhookDeliveratorConfig { + const fn default_lease_timeout_secs() -> u64 { + 60 // one minute + } + + const fn default_first_retry_backoff() -> u64 { + 60 // one minute + } + + const fn default_second_retry_backoff() -> u64 { + 60 * 5 // five minutes + } +} + #[cfg(test)] mod test { use super::*; @@ -1005,6 +1063,11 @@ mod test { tuf_artifact_replication.period_secs = 300 tuf_artifact_replication.min_sled_replication = 3 read_only_region_replacement_start.period_secs = 30 + webhook_dispatcher.period_secs = 42 + webhook_deliverator.period_secs = 43 + webhook_deliverator.lease_timeout_secs = 44 + webhook_deliverator.first_retry_backoff_secs = 45 + webhook_deliverator.second_retry_backoff_secs = 46 [default_region_allocation_strategy] type = "random" seed = 0 @@ -1210,6 +1273,15 @@ mod test { ReadOnlyRegionReplacementStartConfig { period_secs: Duration::from_secs(30), }, + webhook_dispatcher: WebhookDispatcherConfig { + period_secs: Duration::from_secs(42), + }, + webhook_deliverator: WebhookDeliveratorConfig { + period_secs: Duration::from_secs(43), + lease_timeout_secs: 44, + first_retry_backoff_secs: 45, + second_retry_backoff_secs: 46, + }, }, default_region_allocation_strategy: crate::nexus_config::RegionAllocationStrategy::Random { @@ -1296,6 +1368,8 @@ mod test { tuf_artifact_replication.period_secs = 300 tuf_artifact_replication.min_sled_replication = 3 read_only_region_replacement_start.period_secs = 30 + webhook_dispatcher.period_secs = 42 + webhook_deliverator.period_secs = 43 [default_region_allocation_strategy] type = "random" "##, diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index afbbc793193..8e9bae0851a 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -39,6 +39,7 @@ gateway-client.workspace = true headers.workspace = true hex.workspace = true hickory-resolver.workspace = true +hmac.workspace = true http.workspace = true http-body-util.workspace = true hyper.workspace = true @@ -51,6 +52,7 @@ ipnetwork.workspace = true itertools.workspace = true lldpd-client.workspace = true macaddr.workspace = true +maplit.workspace = true # Not under "dev-dependencies"; these also need to be implemented for # integration tests. nexus-config.workspace = true @@ -75,6 +77,7 @@ qorb.workspace = true rand.workspace = true range-requests.workspace = true ref-cast.workspace = true +regex.workspace = true reqwest = { workspace = true, features = ["json"] } ring.workspace = true samael.workspace = true @@ -162,6 +165,7 @@ hickory-resolver.workspace = true tufaceous.workspace = true tufaceous-lib.workspace = true httptest.workspace = true +httpmock.workspace = true strum.workspace = true tufaceous-artifact.workspace = true diff --git a/nexus/auth/src/authz/api_resources.rs b/nexus/auth/src/authz/api_resources.rs index e833d1efcda..70f2dc49fb7 100644 --- a/nexus/auth/src/authz/api_resources.rs +++ b/nexus/auth/src/authz/api_resources.rs @@ -711,6 +711,47 @@ impl AuthorizedResource for TargetReleaseConfig { } } +/// Synthetic resource used for modeling access to the list of webhook event +/// classes. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct WebhookEventClassList; +pub const WEBHOOK_EVENT_CLASS_LIST: WebhookEventClassList = + WebhookEventClassList {}; + +impl oso::PolarClass for WebhookEventClassList { + fn get_polar_class_builder() -> oso::ClassBuilder { + // Roles are not directly attached to EventClassList + oso::Class::builder() + .with_equality_check() + .add_attribute_getter("fleet", |_| FLEET) + } +} + +impl AuthorizedResource for WebhookEventClassList { + fn load_roles<'fut>( + &'fut self, + opctx: &'fut OpContext, + authn: &'fut authn::Context, + roleset: &'fut mut RoleSet, + ) -> futures::future::BoxFuture<'fut, Result<(), Error>> { + load_roles_for_resource_tree(&FLEET, opctx, authn, roleset).boxed() + } + + fn on_unauthorized( + &self, + _: &Authz, + error: Error, + _: AnyActor, + _: Action, + ) -> Error { + error + } + + fn polar_class(&self) -> oso::Class { + Self::get_polar_class() + } +} + // Main resource hierarchy: Projects and their resources authz_resource! { @@ -1101,3 +1142,27 @@ authz_resource! { roles_allowed = false, polar_snippet = FleetChild, } + +authz_resource! { + name = "WebhookEvent", + parent = "Fleet", + primary_key = { uuid_kind = WebhookEventKind }, + roles_allowed = false, + polar_snippet = FleetChild, +} + +authz_resource! { + name = "WebhookReceiver", + parent = "Fleet", + primary_key = { uuid_kind = WebhookReceiverKind }, + roles_allowed = false, + polar_snippet = FleetChild, +} + +authz_resource! { + name = "WebhookSecret", + parent = "WebhookReceiver", + primary_key = { uuid_kind = WebhookSecretKind }, + roles_allowed = false, + polar_snippet = Custom, +} diff --git a/nexus/auth/src/authz/omicron.polar b/nexus/auth/src/authz/omicron.polar index c1e463e9bf3..80abeaa1458 100644 --- a/nexus/auth/src/authz/omicron.polar +++ b/nexus/auth/src/authz/omicron.polar @@ -593,3 +593,24 @@ has_role(USER_DB_INIT: AuthenticatedActor, "admin", _silo: Silo); # Allow the internal API admin permissions on all silos. has_role(USER_INTERNAL_API: AuthenticatedActor, "admin", _silo: Silo); + +resource WebhookSecret { + permissions = [ "read", "modify" ]; + relations = { parent_webhook_receiver: WebhookReceiver }; + + "read" if "read" on "parent_webhook_receiver"; + "modify" if "modify" on "parent_webhook_receiver"; +} + +has_relation(rx: WebhookReceiver, "parent_webhook_receiver", secret: WebhookSecret) + if secret.webhook_receiver = rx; + +resource WebhookEventClassList { + permissions = [ "list_children" ]; + relations = { parent_fleet: Fleet }; + + "list_children" if "viewer" on "parent_fleet"; +} + +has_relation(fleet: Fleet, "parent_fleet", collection: WebhookEventClassList) + if collection.fleet = fleet; diff --git a/nexus/auth/src/authz/oso_generic.rs b/nexus/auth/src/authz/oso_generic.rs index cef33642213..3b47fbfdd0a 100644 --- a/nexus/auth/src/authz/oso_generic.rs +++ b/nexus/auth/src/authz/oso_generic.rs @@ -115,6 +115,7 @@ pub fn make_omicron_oso(log: &slog::Logger) -> Result { SiloIdentityProviderList::get_polar_class(), SiloUserList::get_polar_class(), TargetReleaseConfig::get_polar_class(), + WebhookEventClassList::get_polar_class(), ]; for c in classes { oso_builder = oso_builder.register_class(c)?; @@ -163,6 +164,9 @@ pub fn make_omicron_oso(log: &slog::Logger) -> Result { Sled::init(), TufRepo::init(), TufArtifact::init(), + WebhookEvent::init(), + WebhookReceiver::init(), + WebhookSecret::init(), Zpool::init(), Service::init(), UserBuiltin::init(), diff --git a/nexus/db-fixed-data/src/lib.rs b/nexus/db-fixed-data/src/lib.rs index 062760afc97..696b305ca95 100644 --- a/nexus/db-fixed-data/src/lib.rs +++ b/nexus/db-fixed-data/src/lib.rs @@ -31,6 +31,8 @@ // 001de000-074c built-in services vpc // 001de000-c470 built-in services vpc subnets // 001de000-all0 singleton ID for source IP allowlist ("all0" is like "allow") +// 001de000-7768 singleton ID for webhook probe event ('wh' for 'webhook' +// is ascii 0x77 0x68). use std::sync::LazyLock; diff --git a/nexus/db-macros/outputs/project.txt b/nexus/db-macros/outputs/project.txt index fa08115fdb3..5bf43aa0683 100644 --- a/nexus/db-macros/outputs/project.txt +++ b/nexus/db-macros/outputs/project.txt @@ -352,7 +352,7 @@ impl<'a> Project<'a> { let (authz_silo, _) = Silo::lookup_by_id_no_authz( opctx, datastore, - &db_row.silo_id, + &db_row.silo_id.into(), ) .await?; let authz_project = Self::make_authz( diff --git a/nexus/db-macros/src/lookup.rs b/nexus/db-macros/src/lookup.rs index 6b78a64f5cf..717df2f84e5 100644 --- a/nexus/db-macros/src/lookup.rs +++ b/nexus/db-macros/src/lookup.rs @@ -788,7 +788,7 @@ fn generate_database_functions(config: &Config) -> TokenStream { quote! { let (#(#ancestors_authz_names,)* _) = #parent_resource_name::lookup_by_id_no_authz( - opctx, datastore, &db_row.#parent_id + opctx, datastore, &db_row.#parent_id.into() ).await?; }, quote! { .filter(dsl::#parent_id.eq(#parent_authz_name.id())) }, diff --git a/nexus/db-model/src/instance.rs b/nexus/db-model/src/instance.rs index f915ac7cb14..e1b1a487eb8 100644 --- a/nexus/db-model/src/instance.rs +++ b/nexus/db-model/src/instance.rs @@ -8,6 +8,7 @@ use super::{ }; use crate::collection::DatastoreAttachTargetConfig; use crate::schema::{disk, external_ip, instance}; +use crate::serde_time_delta::optional_time_delta; use chrono::{DateTime, TimeDelta, Utc}; use db_macros::Resource; use diesel::expression::{ValidGrouping, is_aggregate}; @@ -453,86 +454,6 @@ impl InstanceAutoRestart { .and(dsl::updater_id.is_null()) } } - -/// It's just a type with the same representation as a `TimeDelta` that -/// implements `Serialize` and `Deserialize`, because `chrono`'s `Deserialize` -/// implementation for this type is not actually for `TimeDelta`, but for the -/// `rkyv::Archived` wrapper type (see [here]). While `chrono` *does* provide a -/// `Serialize` implementation that we could use with this type, it's preferable -/// to provide our own `Serialize` as well as `Deserialize`, since a future -/// semver-compatible change in `chrono` could change the struct's internal -/// representation, quietly breaking our ability to round-trip it. So, let's -/// just derive both traits for this thing, which we control. -/// -/// If you feel like this is unfortunate...yeah, I do too. -/// -/// [here]: https://docs.rs/chrono/latest/chrono/struct.TimeDelta.html#impl-Deserialize%3CTimeDelta,+__D%3E-for-%3CTimeDelta+as+Archive%3E::Archived -#[derive(Copy, Clone, Debug, Serialize, Deserialize)] -struct SerdeTimeDelta { - secs: i64, - nanos: i32, -} - -impl From for SerdeTimeDelta { - fn from(delta: TimeDelta) -> Self { - Self { secs: delta.num_seconds(), nanos: delta.subsec_nanos() } - } -} - -impl TryFrom for TimeDelta { - type Error = &'static str; - fn try_from( - SerdeTimeDelta { secs, nanos }: SerdeTimeDelta, - ) -> Result { - // This is a bit weird: `chrono::TimeDelta`'s getter for - // nanoseconds (`TimeDelta::subsec_nanos`) returns them as an i32, - // with the sign coming from the seconds part, but when constructing - // a `TimeDelta`, it takes them as a `u32` and panics if they're too - // big. So, we take the absolute value here, because what the serialize - // impl saw may have had its sign bit set, but the constructor will get - // mad if we give it something with that bit set. Hopefully that made - // sense? - let nanos = nanos.unsigned_abs(); - TimeDelta::new(secs, nanos).ok_or("time delta out of range") - } -} -mod optional_time_delta { - use super::*; - use serde::{Deserializer, Serializer}; - - pub(super) fn deserialize<'de, D>( - deserializer: D, - ) -> Result, D::Error> - where - D: Deserializer<'de>, - { - let val = Option::::deserialize(deserializer)?; - match val { - None => return Ok(None), - Some(delta) => delta - .try_into() - .map_err(|e| { - ::custom(format!( - "{e}: {val:?}" - )) - }) - .map(Some), - } - } - - pub(super) fn serialize( - td: &Option, - serializer: S, - ) -> Result - where - S: Serializer, - { - td.as_ref() - .map(|&delta| SerdeTimeDelta::from(delta)) - .serialize(serializer) - } -} - /// The parts of an Instance that can be directly updated after creation. #[derive(Clone, Debug, AsChangeset, Serialize, Deserialize)] #[diesel(table_name = instance, treat_none_as_null = true)] diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index 5323c171714..76ca1546585 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -61,11 +61,19 @@ mod producer_endpoint; mod project; mod rendezvous_debug_dataset; mod semver_version; +mod serde_time_delta; mod switch_interface; mod switch_port; mod target_release; mod v2p_mapping; mod vmm_state; +mod webhook_delivery; +mod webhook_delivery_attempt_result; +mod webhook_delivery_state; +mod webhook_delivery_trigger; +mod webhook_event; +mod webhook_event_class; +mod webhook_rx; // These actually represent subqueries, not real table. // However, they must be defined in the same crate as our tables // for join-based marker trait generation. @@ -232,6 +240,13 @@ pub use vpc_firewall_rule::*; pub use vpc_route::*; pub use vpc_router::*; pub use vpc_subnet::*; +pub use webhook_delivery::*; +pub use webhook_delivery_attempt_result::*; +pub use webhook_delivery_state::*; +pub use webhook_delivery_trigger::*; +pub use webhook_event::*; +pub use webhook_event_class::*; +pub use webhook_rx::*; pub use zpool::*; // TODO: The existence of both impl_enum_type and impl_enum_wrapper is a diff --git a/nexus/db-model/src/schema.rs b/nexus/db-model/src/schema.rs index f646561e049..57df04d682b 100644 --- a/nexus/db-model/src/schema.rs +++ b/nexus/db-model/src/schema.rs @@ -2172,3 +2172,111 @@ table! { region_snapshot_snapshot_id -> Nullable, } } + +table! { + webhook_receiver (id) { + id -> Uuid, + name -> Text, + description -> Text, + time_created -> Timestamptz, + time_modified -> Timestamptz, + time_deleted -> Nullable, + secret_gen -> Int8, + subscription_gen -> Int8, + endpoint -> Text, + } +} + +table! { + webhook_secret (id) { + id -> Uuid, + time_created -> Timestamptz, + time_modified -> Timestamptz, + time_deleted -> Nullable, + rx_id -> Uuid, + secret -> Text, + } +} + +table! { + webhook_rx_subscription (rx_id, event_class) { + rx_id -> Uuid, + event_class -> crate::WebhookEventClassEnum, + glob -> Nullable, + time_created -> Timestamptz, + } +} + +table! { + webhook_rx_event_glob (rx_id, glob) { + rx_id -> Uuid, + glob -> Text, + regex -> Text, + time_created -> Timestamptz, + schema_version -> Text, + } +} + +allow_tables_to_appear_in_same_query!( + webhook_receiver, + webhook_secret, + webhook_rx_subscription, + webhook_rx_event_glob, + webhook_event, +); +joinable!(webhook_rx_subscription -> webhook_receiver (rx_id)); +joinable!(webhook_secret -> webhook_receiver (rx_id)); +joinable!(webhook_rx_event_glob -> webhook_receiver (rx_id)); + +table! { + webhook_event (id) { + id -> Uuid, + time_created -> Timestamptz, + time_modified -> Timestamptz, + event_class -> crate::WebhookEventClassEnum, + event -> Jsonb, + time_dispatched -> Nullable, + num_dispatched -> Int8, + } +} + +table! { + webhook_delivery (id) { + id -> Uuid, + event_id -> Uuid, + rx_id -> Uuid, + triggered_by -> crate::WebhookDeliveryTriggerEnum, + payload -> Jsonb, + attempts -> Int2, + time_created -> Timestamptz, + time_completed -> Nullable, + state -> crate::WebhookDeliveryStateEnum, + deliverator_id -> Nullable, + time_leased -> Nullable, + } +} + +allow_tables_to_appear_in_same_query!(webhook_receiver, webhook_delivery); +joinable!(webhook_delivery -> webhook_receiver (rx_id)); +allow_tables_to_appear_in_same_query!(webhook_delivery, webhook_event); +allow_tables_to_appear_in_same_query!(webhook_delivery_attempt, webhook_event); +joinable!(webhook_delivery -> webhook_event (event_id)); + +table! { + webhook_delivery_attempt (delivery_id, attempt) { + delivery_id -> Uuid, + attempt -> Int2, + rx_id -> Uuid, + result -> crate::WebhookDeliveryAttemptResultEnum, + response_status -> Nullable, + response_duration -> Nullable, + time_created -> Timestamptz, + deliverator_id -> Uuid, + } +} + +allow_tables_to_appear_in_same_query!( + webhook_delivery, + webhook_delivery_attempt +); +joinable!(webhook_delivery_attempt -> webhook_delivery (delivery_id)); diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index 139cac28d74..1c9b4badae6 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -16,7 +16,7 @@ use std::{collections::BTreeMap, sync::LazyLock}; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: Version = Version::new(131, 0, 0); +pub const SCHEMA_VERSION: Version = Version::new(132, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -28,6 +28,7 @@ static KNOWN_VERSIONS: LazyLock> = LazyLock::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(132, "webhooks"), KnownVersion::new(131, "tuf-generation"), KnownVersion::new(130, "bp-sled-agent-generation"), KnownVersion::new(129, "create-target-release"), diff --git a/nexus/db-model/src/serde_time_delta.rs b/nexus/db-model/src/serde_time_delta.rs new file mode 100644 index 00000000000..49b9b7239dd --- /dev/null +++ b/nexus/db-model/src/serde_time_delta.rs @@ -0,0 +1,87 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use chrono::TimeDelta; +use serde::Deserialize; +use serde::Serialize; + +/// It's just a type with the same representation as a `TimeDelta` that +/// implements `Serialize` and `Deserialize`, because `chrono`'s `Deserialize` +/// implementation for this type is not actually for `TimeDelta`, but for the +/// `rkyv::Archived` wrapper type (see [here]). While `chrono` *does* provide a +/// `Serialize` implementation that we could use with this type, it's preferable +/// to provide our own `Serialize` as well as `Deserialize`, since a future +/// semver-compatible change in `chrono` could change the struct's internal +/// representation, quietly breaking our ability to round-trip it. So, let's +/// just derive both traits for this thing, which we control. +/// +/// If you feel like this is unfortunate...yeah, I do too. +/// +/// [here]: https://docs.rs/chrono/latest/chrono/struct.TimeDelta.html#impl-Deserialize%3CTimeDelta,+__D%3E-for-%3CTimeDelta+as+Archive%3E::Archived +#[derive(Copy, Clone, Debug, Serialize, Deserialize)] +pub(crate) struct SerdeTimeDelta { + secs: i64, + nanos: i32, +} + +impl From for SerdeTimeDelta { + fn from(delta: TimeDelta) -> Self { + Self { secs: delta.num_seconds(), nanos: delta.subsec_nanos() } + } +} + +impl TryFrom for TimeDelta { + type Error = &'static str; + fn try_from( + SerdeTimeDelta { secs, nanos }: SerdeTimeDelta, + ) -> Result { + // This is a bit weird: `chrono::TimeDelta`'s getter for + // nanoseconds (`TimeDelta::subsec_nanos`) returns them as an i32, + // with the sign coming from the seconds part, but when constructing + // a `TimeDelta`, it takes them as a `u32` and panics if they're too + // big. So, we take the absolute value here, because what the serialize + // impl saw may have had its sign bit set, but the constructor will get + // mad if we give it something with that bit set. Hopefully that made + // sense? + let nanos = nanos.unsigned_abs(); + TimeDelta::new(secs, nanos).ok_or("time delta out of range") + } +} + +pub(crate) mod optional_time_delta { + use super::*; + use serde::{Deserializer, Serializer}; + + pub(crate) fn deserialize<'de, D>( + deserializer: D, + ) -> Result, D::Error> + where + D: Deserializer<'de>, + { + let val = Option::::deserialize(deserializer)?; + match val { + None => return Ok(None), + Some(delta) => delta + .try_into() + .map_err(|e| { + ::custom(format!( + "{e}: {val:?}" + )) + }) + .map(Some), + } + } + + pub(crate) fn serialize( + td: &Option, + serializer: S, + ) -> Result + where + S: Serializer, + { + td.as_ref() + .map(|&delta| SerdeTimeDelta::from(delta)) + .serialize(serializer) + } +} diff --git a/nexus/db-model/src/webhook_delivery.rs b/nexus/db-model/src/webhook_delivery.rs new file mode 100644 index 00000000000..6d3aa0ec931 --- /dev/null +++ b/nexus/db-model/src/webhook_delivery.rs @@ -0,0 +1,201 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::SqlU8; +use crate::WebhookDeliveryAttemptResult; +use crate::WebhookDeliveryState; +use crate::WebhookDeliveryTrigger; +use crate::WebhookEvent; +use crate::WebhookEventClass; +use crate::schema::{webhook_delivery, webhook_delivery_attempt}; +use crate::serde_time_delta::optional_time_delta; +use crate::typed_uuid::DbTypedUuid; +use chrono::{DateTime, TimeDelta, Utc}; +use nexus_types::external_api::views; +use nexus_types::identity::Asset; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::{ + OmicronZoneKind, OmicronZoneUuid, WebhookDeliveryKind, WebhookDeliveryUuid, + WebhookEventKind, WebhookEventUuid, WebhookReceiverKind, + WebhookReceiverUuid, +}; +use serde::Deserialize; +use serde::Serialize; + +/// A webhook delivery dispatch entry. +#[derive( + Clone, + Queryable, + Debug, + Selectable, + Serialize, + Deserialize, + Insertable, + PartialEq, +)] +#[diesel(table_name = webhook_delivery)] +pub struct WebhookDelivery { + /// ID of this dispatch entry. + pub id: DbTypedUuid, + + /// ID of the event dispatched to this receiver (foreign key into + /// `webhook_event`). + pub event_id: DbTypedUuid, + + /// ID of the receiver to which this event is dispatched (foreign key into + /// `webhook_rx`). + pub rx_id: DbTypedUuid, + + /// Describes why this delivery was triggered. + pub triggered_by: WebhookDeliveryTrigger, + + /// The data payload as sent to this receiver. + pub payload: serde_json::Value, + + /// Attempt count + pub attempts: SqlU8, + + /// The time at which this dispatch entry was created. + pub time_created: DateTime, + + /// The time at which the webhook message was either delivered successfully + /// or permanently failed. + pub time_completed: Option>, + + pub state: WebhookDeliveryState, + + pub deliverator_id: Option>, + + pub time_leased: Option>, +} + +impl WebhookDelivery { + pub fn new( + event: &WebhookEvent, + rx_id: &WebhookReceiverUuid, + trigger: WebhookDeliveryTrigger, + ) -> Self { + Self { + id: WebhookDeliveryUuid::new_v4().into(), + event_id: event.id().into(), + rx_id: (*rx_id).into(), + triggered_by: trigger, + payload: event.event.clone(), + attempts: SqlU8::new(0), + time_created: Utc::now(), + time_completed: None, + deliverator_id: None, + time_leased: None, + state: WebhookDeliveryState::Pending, + } + } + + pub fn new_probe( + rx_id: &WebhookReceiverUuid, + deliverator_id: &OmicronZoneUuid, + ) -> Self { + Self { + // Just kinda make something up... + id: WebhookDeliveryUuid::new_v4().into(), + // There's a singleton entry in the `webhook_event` table for + // probes, so that we can reference a real event ID but need not + // create a bunch of duplicate empty events every time a probe is sent. + event_id: WebhookEventUuid::from_untyped_uuid( + WebhookEvent::PROBE_EVENT_ID, + ) + .into(), + rx_id: (*rx_id).into(), + triggered_by: WebhookDeliveryTrigger::Probe, + state: WebhookDeliveryState::Pending, + payload: serde_json::json!({}), + attempts: SqlU8::new(0), + time_created: Utc::now(), + time_completed: None, + deliverator_id: Some((*deliverator_id).into()), + time_leased: Some(Utc::now()), + } + } + + pub fn to_api_delivery( + &self, + event_class: WebhookEventClass, + attempts: &[WebhookDeliveryAttempt], + ) -> views::WebhookDelivery { + let mut view = views::WebhookDelivery { + id: self.id.into_untyped_uuid(), + webhook_id: self.rx_id.into(), + event_class: event_class.as_str().to_owned(), + event_id: self.event_id.into(), + state: self.state.into(), + trigger: self.triggered_by.into(), + attempts: attempts + .iter() + .map(views::WebhookDeliveryAttempt::from) + .collect(), + time_started: self.time_created, + }; + // Make sure attempts are in order; each attempt entry also includes an + // attempt number, which should be used authoritatively to determine the + // ordering of attempts, but it seems nice to also sort the list, + // because we can... + view.attempts.sort_by_key(|a| a.attempt); + view + } +} + +/// An individual delivery attempt for a [`WebhookDelivery`]. +#[derive( + Clone, + Queryable, + Debug, + Selectable, + Serialize, + Deserialize, + Insertable, + PartialEq, +)] +#[diesel(table_name = webhook_delivery_attempt)] +pub struct WebhookDeliveryAttempt { + /// ID of the delivery entry (foreign key into `webhook_delivery`). + pub delivery_id: DbTypedUuid, + + /// Attempt number (retry count). + pub attempt: SqlU8, + + /// ID of the receiver to which this event is dispatched (foreign key into + /// `webhook_rx`). + pub rx_id: DbTypedUuid, + + pub result: WebhookDeliveryAttemptResult, + + pub response_status: Option, + + #[serde(with = "optional_time_delta")] + pub response_duration: Option, + + pub time_created: DateTime, + + pub deliverator_id: DbTypedUuid, +} + +impl WebhookDeliveryAttempt { + fn response_view(&self) -> Option { + Some(views::WebhookDeliveryResponse { + status: self.response_status? as u16, // i hate that this has to signed in the database... + duration_ms: self.response_duration?.num_milliseconds() as usize, + }) + } +} + +impl From<&'_ WebhookDeliveryAttempt> for views::WebhookDeliveryAttempt { + fn from(attempt: &WebhookDeliveryAttempt) -> Self { + let response = attempt.response_view(); + Self { + attempt: attempt.attempt.0 as usize, + result: attempt.result.into(), + time_sent: attempt.time_created, + response, + } + } +} diff --git a/nexus/db-model/src/webhook_delivery_attempt_result.rs b/nexus/db-model/src/webhook_delivery_attempt_result.rs new file mode 100644 index 00000000000..b75bef35147 --- /dev/null +++ b/nexus/db-model/src/webhook_delivery_attempt_result.rs @@ -0,0 +1,65 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use super::impl_enum_type; +use nexus_types::external_api::views; +use serde::Deserialize; +use serde::Serialize; +use std::fmt; + +impl_enum_type!( + #[derive(SqlType, Debug, Clone)] + #[diesel(postgres_type(name = "webhook_delivery_attempt_result", schema = "public"))] + pub struct WebhookDeliveryAttemptResultEnum; + + #[derive( + Copy, + Clone, + Debug, + PartialEq, + AsExpression, + FromSqlRow, + Serialize, + Deserialize, + strum::VariantArray, + )] + #[diesel(sql_type = WebhookDeliveryAttemptResultEnum)] + pub enum WebhookDeliveryAttemptResult; + + FailedHttpError => b"failed_http_error" + FailedUnreachable => b"failed_unreachable" + FailedTimeout => b"failed_timeout" + Succeeded => b"succeeded" +); + +impl WebhookDeliveryAttemptResult { + pub fn is_failed(&self) -> bool { + // Use canonical implementation from the API type. + views::WebhookDeliveryAttemptResult::from(*self).is_failed() + } +} + +impl fmt::Display for WebhookDeliveryAttemptResult { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Use canonical format from the API type. + views::WebhookDeliveryAttemptResult::from(*self).fmt(f) + } +} + +impl From + for views::WebhookDeliveryAttemptResult +{ + fn from(result: WebhookDeliveryAttemptResult) -> Self { + match result { + WebhookDeliveryAttemptResult::FailedHttpError => { + Self::FailedHttpError + } + WebhookDeliveryAttemptResult::FailedTimeout => Self::FailedTimeout, + WebhookDeliveryAttemptResult::FailedUnreachable => { + Self::FailedUnreachable + } + WebhookDeliveryAttemptResult::Succeeded => Self::Succeeded, + } + } +} diff --git a/nexus/db-model/src/webhook_delivery_state.rs b/nexus/db-model/src/webhook_delivery_state.rs new file mode 100644 index 00000000000..c381b4323cf --- /dev/null +++ b/nexus/db-model/src/webhook_delivery_state.rs @@ -0,0 +1,67 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use super::impl_enum_type; +use nexus_types::external_api::views; +use serde::Deserialize; +use serde::Serialize; +use std::fmt; + +impl_enum_type!( + #[derive(SqlType, Debug, Clone)] + #[diesel(postgres_type(name = "webhook_delivery_state", schema = "public"))] + pub struct WebhookDeliveryStateEnum; + + #[derive( + Copy, + Clone, + Debug, + PartialEq, + Serialize, + Deserialize, + AsExpression, + FromSqlRow, + strum::VariantArray, + )] + #[diesel(sql_type = WebhookDeliveryStateEnum)] + #[serde(rename_all = "snake_case")] + pub enum WebhookDeliveryState; + + Pending => b"pending" + Failed => b"failed" + Delivered => b"delivered" + +); + +impl fmt::Display for WebhookDeliveryState { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // Forward to the canonical implementation in nexus-types. + views::WebhookDeliveryState::from(*self).fmt(f) + } +} + +impl From for views::WebhookDeliveryState { + fn from(trigger: WebhookDeliveryState) -> Self { + match trigger { + WebhookDeliveryState::Pending => Self::Pending, + WebhookDeliveryState::Failed => Self::Failed, + WebhookDeliveryState::Delivered => Self::Delivered, + } + } +} + +impl From for WebhookDeliveryState { + fn from(trigger: views::WebhookDeliveryState) -> Self { + match trigger { + views::WebhookDeliveryState::Pending => Self::Pending, + views::WebhookDeliveryState::Failed => Self::Failed, + views::WebhookDeliveryState::Delivered => Self::Delivered, + } + } +} + +impl diesel::query_builder::QueryId for WebhookDeliveryStateEnum { + type QueryId = (); + const HAS_STATIC_QUERY_ID: bool = false; +} diff --git a/nexus/db-model/src/webhook_delivery_trigger.rs b/nexus/db-model/src/webhook_delivery_trigger.rs new file mode 100644 index 00000000000..640372b44e9 --- /dev/null +++ b/nexus/db-model/src/webhook_delivery_trigger.rs @@ -0,0 +1,71 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use super::impl_enum_type; +use nexus_types::external_api::views; +use serde::Deserialize; +use serde::Serialize; +use std::fmt; + +impl_enum_type!( + #[derive(SqlType, Debug, Clone)] + #[diesel(postgres_type(name = "webhook_delivery_trigger", schema = "public"))] + pub struct WebhookDeliveryTriggerEnum; + + #[derive( + Copy, + Clone, + Debug, + PartialEq, + Serialize, + Deserialize, + AsExpression, + FromSqlRow, + strum::VariantArray, + )] + #[diesel(sql_type = WebhookDeliveryTriggerEnum)] + #[serde(rename_all = "snake_case")] + pub enum WebhookDeliveryTrigger; + + Event => b"event" + Resend => b"resend" + Probe => b"probe" + +); + +impl WebhookDeliveryTrigger { + pub const ALL: &'static [Self] = ::VARIANTS; +} + +impl fmt::Display for WebhookDeliveryTrigger { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // Forward to the canonical implementation in nexus-types. + views::WebhookDeliveryTrigger::from(*self).fmt(f) + } +} + +impl From for views::WebhookDeliveryTrigger { + fn from(trigger: WebhookDeliveryTrigger) -> Self { + match trigger { + WebhookDeliveryTrigger::Event => Self::Event, + WebhookDeliveryTrigger::Resend => Self::Resend, + WebhookDeliveryTrigger::Probe => Self::Probe, + } + } +} + +impl From for WebhookDeliveryTrigger { + fn from(trigger: views::WebhookDeliveryTrigger) -> Self { + match trigger { + views::WebhookDeliveryTrigger::Event => Self::Event, + views::WebhookDeliveryTrigger::Resend => Self::Resend, + views::WebhookDeliveryTrigger::Probe => Self::Probe, + } + } +} + +impl diesel::query_builder::QueryId for WebhookDeliveryTriggerEnum { + type QueryId = (); + const HAS_STATIC_QUERY_ID: bool = false; +} diff --git a/nexus/db-model/src/webhook_event.rs b/nexus/db-model/src/webhook_event.rs new file mode 100644 index 00000000000..4a56cb2bd12 --- /dev/null +++ b/nexus/db-model/src/webhook_event.rs @@ -0,0 +1,48 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::WebhookEventClass; +use crate::schema::webhook_event; +use chrono::{DateTime, Utc}; +use db_macros::Asset; +use serde::{Deserialize, Serialize}; + +/// A webhook event. +#[derive( + Clone, + Queryable, + Debug, + Selectable, + Serialize, + Deserialize, + Insertable, + PartialEq, + Asset, +)] +#[diesel(table_name = webhook_event)] +#[asset(uuid_kind = WebhookEventKind)] +pub struct WebhookEvent { + #[diesel(embed)] + pub identity: WebhookEventIdentity, + + /// The time at which this event was dispatched by creating entries in the + /// `webhook_delivery` table. + /// + /// If this is `None`, this event has yet to be dispatched. + pub time_dispatched: Option>, + + /// The class of this event. + pub event_class: WebhookEventClass, + + /// The event's data payload. + pub event: serde_json::Value, + + pub num_dispatched: i64, +} + +impl WebhookEvent { + /// UUID of the singleton event entry for webhook liveness probes. + pub const PROBE_EVENT_ID: uuid::Uuid = + uuid::Uuid::from_u128(0x001de000_7768_4000_8000_000000000001); +} diff --git a/nexus/db-model/src/webhook_event_class.rs b/nexus/db-model/src/webhook_event_class.rs new file mode 100644 index 00000000000..6f79cbe7748 --- /dev/null +++ b/nexus/db-model/src/webhook_event_class.rs @@ -0,0 +1,194 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use super::impl_enum_type; +use nexus_types::external_api::views; +use serde::de::{self, Deserialize, Deserializer}; +use serde::ser::{Serialize, Serializer}; +use std::fmt; + +impl_enum_type!( + #[derive(SqlType, Debug, Clone)] + #[diesel(postgres_type(name = "webhook_event_class", schema = "public"))] + pub struct WebhookEventClassEnum; + + #[derive( + Copy, + Clone, + Debug, + PartialEq, + Eq, + Hash, + AsExpression, + FromSqlRow, + strum::VariantArray, + )] + #[diesel(sql_type = WebhookEventClassEnum)] + pub enum WebhookEventClass; + + Probe => b"probe" + TestFoo => b"test.foo" + TestFooBar => b"test.foo.bar" + TestFooBaz => b"test.foo.baz" + TestQuuxBar => b"test.quux.bar" + TestQuuxBarBaz => b"test.quux.bar.baz" +); + +impl WebhookEventClass { + pub fn as_str(&self) -> &'static str { + // TODO(eliza): it would be really nice if these strings were all + // declared a single time, rather than twice (in both `impl_enum_type!` + // and here)... + match self { + Self::Probe => "probe", + Self::TestFoo => "test.foo", + Self::TestFooBar => "test.foo.bar", + Self::TestFooBaz => "test.foo.baz", + Self::TestQuuxBar => "test.quux.bar", + Self::TestQuuxBarBaz => "test.quux.bar.baz", + } + } + + /// Returns `true` if this event class is only used for testing and should + /// not be incldued in the public event class list API endpoint. + pub fn is_test(&self) -> bool { + matches!( + self, + Self::TestFoo + | Self::TestFooBar + | Self::TestFooBaz + | Self::TestQuuxBar + | Self::TestQuuxBarBaz + ) + } + + /// Returns a human-readable description string describing this event class. + pub fn description(&self) -> &'static str { + match self { + Self::Probe => { + "Synthetic events sent for webhook receiver liveness probes.\n\ + Receivers should return 2xx HTTP responses for these events, \ + but they should NOT be treated as notifications of an actual \ + event in the system." + } + Self::TestFoo + | Self::TestFooBar + | Self::TestFooBaz + | Self::TestQuuxBar + | Self::TestQuuxBarBaz => { + "This is a test of the emergency alert system" + } + } + } + + /// All webhook event classes. + pub const ALL_CLASSES: &'static [Self] = + ::VARIANTS; +} + +impl fmt::Display for WebhookEventClass { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.as_str()) + } +} + +impl Serialize for WebhookEventClass { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(self.as_str()) + } +} + +impl<'de> Deserialize<'de> for WebhookEventClass { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + <&'de str>::deserialize(deserializer)? + .parse::() + .map_err(de::Error::custom) + } +} + +impl diesel::query_builder::QueryId for WebhookEventClassEnum { + type QueryId = (); + const HAS_STATIC_QUERY_ID: bool = false; +} + +impl std::str::FromStr for WebhookEventClass { + type Err = EventClassParseError; + fn from_str(s: &str) -> Result { + for &class in Self::ALL_CLASSES { + if s == class.as_str() { + return Ok(class); + } + } + + Err(EventClassParseError(())) + } +} + +impl From for views::EventClass { + fn from(class: WebhookEventClass) -> Self { + Self { + name: class.to_string(), + description: class.description().to_string(), + } + } +} + +#[derive(Debug, Eq, PartialEq)] +pub struct EventClassParseError(()); + +impl fmt::Display for EventClassParseError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "expected one of [")?; + let mut variants = WebhookEventClass::ALL_CLASSES.iter(); + if let Some(v) = variants.next() { + write!(f, "{v}")?; + for v in variants { + write!(f, ", {v}")?; + } + } + f.write_str("]") + } +} + +impl std::error::Error for EventClassParseError {} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_from_str_roundtrips() { + for &variant in WebhookEventClass::ALL_CLASSES { + assert_eq!(Ok(dbg!(variant)), dbg!(variant.to_string().parse())); + } + } + + // This is mainly a regression test to ensure that, should anyone add new + // `test.` variants in future, the `WebhookEventClass::is_test()` method + // returns `true` for them. + #[test] + fn test_is_test() { + let problematic_variants = WebhookEventClass::ALL_CLASSES + .iter() + .copied() + .filter(|variant| { + variant.as_str().starts_with("test.") && !variant.is_test() + }) + .collect::>(); + assert_eq!( + problematic_variants, + Vec::::new(), + "you have added one or more new `test.*` webhook event class \ + variant(s), but you seem to have not updated the \ + `WebhookEventClass::is_test()` method!\nthe problematic \ + variant(s) are: {problematic_variants:?}", + ); + } +} diff --git a/nexus/db-model/src/webhook_rx.rs b/nexus/db-model/src/webhook_rx.rs new file mode 100644 index 00000000000..35ec070763b --- /dev/null +++ b/nexus/db-model/src/webhook_rx.rs @@ -0,0 +1,360 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::EventClassParseError; +use crate::Generation; +use crate::Name; +use crate::SemverVersion; +use crate::WebhookEventClass; +use crate::collection::DatastoreCollectionConfig; +use crate::schema::{ + webhook_receiver, webhook_rx_event_glob, webhook_rx_subscription, + webhook_secret, +}; +use crate::schema_versions; +use crate::typed_uuid::DbTypedUuid; +use chrono::{DateTime, Utc}; +use db_macros::{Asset, Resource}; +use nexus_types::external_api::views; +use nexus_types::identity::Resource; +use omicron_common::api::external::Error; +use omicron_uuid_kinds::{ + GenericUuid, WebhookReceiverKind, WebhookReceiverUuid, WebhookSecretUuid, +}; +use serde::{Deserialize, Serialize}; +use std::str::FromStr; +use uuid::Uuid; + +/// The full configuration of a webhook receiver, including the +/// [`WebhookReceiver`] itself and its subscriptions and secrets. +#[derive(Clone, Debug)] +pub struct WebhookReceiverConfig { + pub rx: WebhookReceiver, + pub secrets: Vec, + pub events: Vec, +} + +impl TryFrom for views::WebhookReceiver { + type Error = Error; + fn try_from( + WebhookReceiverConfig { rx, secrets, events }: WebhookReceiverConfig, + ) -> Result { + let secrets = secrets + .iter() + .map(|WebhookSecret { identity, .. }| views::WebhookSecretId { + id: identity.id.into_untyped_uuid(), + }) + .collect(); + let events = events + .into_iter() + .map(WebhookSubscriptionKind::into_event_class_string) + .collect(); + let endpoint = + rx.endpoint.parse().map_err(|e| Error::InternalError { + // This is an internal error, as we should not have ever allowed + // an invalid URL to be inserted into the database... + internal_message: format!( + "invalid webhook URL {:?}: {e}", + rx.endpoint, + ), + })?; + Ok(views::WebhookReceiver { + identity: rx.identity(), + endpoint, + secrets, + events, + }) + } +} + +/// A row in the `webhook_receiver` table. +#[derive( + Clone, + Debug, + Queryable, + Selectable, + Resource, + Insertable, + Serialize, + Deserialize, +)] +#[resource(uuid_kind = WebhookReceiverKind)] +#[diesel(table_name = webhook_receiver)] +pub struct WebhookReceiver { + #[diesel(embed)] + pub identity: WebhookReceiverIdentity, + pub endpoint: String, + + /// child resource generation number for secrets, per RFD 192 + pub secret_gen: Generation, + /// child resource generation number for event subscriptions, per RFD 192 + pub subscription_gen: Generation, +} + +// Note that while we have both a `secret_gen` and a `subscription_gen`, we only +// implement `DatastoreCollection` for secrets, not subscriptions. This is +// because subscriptions are updated in a batch, using a transaction, rather +// than via add and delete operations for individual IDs, like secrets. +impl DatastoreCollectionConfig for WebhookReceiver { + type CollectionId = Uuid; + type GenerationNumberColumn = webhook_receiver::dsl::secret_gen; + type CollectionTimeDeletedColumn = webhook_receiver::dsl::time_deleted; + type CollectionIdColumn = webhook_secret::dsl::rx_id; +} + +/// Describes a set of updates for the [`WebhookReceiver`] model. +#[derive(Clone, AsChangeset)] +#[diesel(table_name = webhook_receiver)] +pub struct WebhookReceiverUpdate { + pub name: Option, + pub description: Option, + pub endpoint: Option, + pub time_modified: DateTime, + pub subscription_gen: Option, +} + +#[derive( + Clone, + Debug, + Queryable, + Selectable, + Insertable, + Serialize, + Deserialize, + Asset, +)] +#[asset(uuid_kind = WebhookSecretKind)] +#[diesel(table_name = webhook_secret)] +pub struct WebhookSecret { + #[diesel(embed)] + pub identity: WebhookSecretIdentity, + #[diesel(column_name = rx_id)] + pub webhook_receiver_id: DbTypedUuid, + pub secret: String, + pub time_deleted: Option>, +} + +impl WebhookSecret { + pub fn new(rx_id: WebhookReceiverUuid, secret: String) -> Self { + Self { + identity: WebhookSecretIdentity::new(WebhookSecretUuid::new_v4()), + webhook_receiver_id: rx_id.into(), + secret, + time_deleted: None, + } + } +} + +impl From for views::WebhookSecretId { + fn from(secret: WebhookSecret) -> Self { + Self { id: secret.identity.id.into_untyped_uuid() } + } +} + +#[derive( + Clone, Debug, Queryable, Selectable, Insertable, Serialize, Deserialize, +)] +#[diesel(table_name = webhook_rx_subscription)] +pub struct WebhookRxSubscription { + pub rx_id: DbTypedUuid, + pub event_class: WebhookEventClass, + pub glob: Option, + pub time_created: DateTime, +} + +#[derive( + Clone, Debug, Queryable, Selectable, Insertable, Serialize, Deserialize, +)] +#[diesel(table_name = webhook_rx_event_glob)] +pub struct WebhookRxEventGlob { + pub rx_id: DbTypedUuid, + #[diesel(embed)] + pub glob: WebhookGlob, + pub time_created: DateTime, + pub schema_version: SemverVersion, +} + +impl WebhookRxEventGlob { + pub fn new(rx_id: WebhookReceiverUuid, glob: WebhookGlob) -> Self { + Self { + rx_id: DbTypedUuid(rx_id), + glob, + time_created: Utc::now(), + schema_version: schema_versions::SCHEMA_VERSION.into(), + } + } +} +#[derive(Clone, Debug, Eq, PartialEq, Hash)] +pub enum WebhookSubscriptionKind { + Glob(WebhookGlob), + Exact(WebhookEventClass), +} + +impl WebhookSubscriptionKind { + pub fn new(value: String) -> Result { + if value.is_empty() { + return Err(Error::invalid_value( + "event_class", + "must not be empty", + )); + } + + if value.contains('*') { + let regex = WebhookGlob::regex_from_glob(&value)?; + return Ok(Self::Glob(WebhookGlob { regex, glob: value })); + } + + let class = value.parse().map_err(|e: EventClassParseError| { + Error::invalid_value("event_class", e.to_string()) + })?; + Ok(Self::Exact(class)) + } + + fn into_event_class_string(self) -> String { + match self { + Self::Exact(class) => class.to_string(), + Self::Glob(WebhookGlob { glob, .. }) => glob, + } + } +} + +#[derive( + Clone, + Debug, + Eq, + PartialEq, + Hash, + Queryable, + Selectable, + Insertable, + Serialize, + Deserialize, +)] +#[diesel(table_name = webhook_rx_event_glob)] +pub struct WebhookGlob { + pub glob: String, + pub regex: String, +} + +impl FromStr for WebhookGlob { + type Err = Error; + fn from_str(glob: &str) -> Result { + let regex = Self::regex_from_glob(glob)?; + Ok(Self { glob: glob.to_string(), regex }) + } +} + +impl TryFrom for WebhookGlob { + type Error = Error; + fn try_from(glob: String) -> Result { + let regex = Self::regex_from_glob(&glob)?; + Ok(Self { glob, regex }) + } +} + +impl WebhookGlob { + fn regex_from_glob(glob: &str) -> Result { + let seg2regex = |segment: &str, + regex: &mut String| + -> Result<(), Error> { + match segment { + // Match one segment (i.e. any number of segment characters) + "*" => regex.push_str("[^\\.]+"), + // Match any number of segments + "**" => regex.push_str(".+"), + s if s.contains('*') => { + return Err(Error::invalid_value( + "event_class", + "invalid event class {glob:?}: all segments must be \ + either '*', '**', or any sequence of non-'*' characters", + )); + } + // Match the literal segment. + s => regex.push_str(s), + } + Ok(()) + }; + + // The subscription's regex will always be at least as long as the event + // class glob, plus start and end anchors. + let mut regex = String::with_capacity(glob.len()); + + regex.push('^'); // Start anchor + let mut segments = glob.split('.'); + if let Some(segment) = segments.next() { + seg2regex(segment, &mut regex)?; + for segment in segments { + regex.push_str("\\."); // segment separator + seg2regex(segment, &mut regex)?; + } + } else { + return Err(Error::invalid_value( + "event_class", + "must not be empty", + )); + }; + regex.push('$'); // End anchor + + Ok(regex) + } +} + +impl WebhookRxSubscription { + pub fn exact( + rx_id: WebhookReceiverUuid, + event_class: WebhookEventClass, + ) -> Self { + Self { + rx_id: DbTypedUuid(rx_id), + event_class, + glob: None, + time_created: Utc::now(), + } + } + + pub fn for_glob( + glob: &WebhookRxEventGlob, + event_class: WebhookEventClass, + ) -> Self { + Self { + rx_id: glob.rx_id, + glob: Some(glob.glob.glob.clone()), + event_class, + time_created: Utc::now(), + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_event_class_glob_to_regex() { + const CASES: &[(&str, &str)] = &[ + ("foo.bar", "^foo\\.bar$"), + ("foo.*.bar", "^foo\\.[^\\.]+\\.bar$"), + ("foo.*", "^foo\\.[^\\.]+$"), + ("*.foo", "^[^\\.]+\\.foo$"), + ("foo.**.bar", "^foo\\..+\\.bar$"), + ("foo.**", "^foo\\..+$"), + ("foo_bar.baz", "^foo_bar\\.baz$"), + ("foo_bar.*.baz", "^foo_bar\\.[^\\.]+\\.baz$"), + ]; + for (class, regex) in CASES { + let glob = match WebhookGlob::from_str(dbg!(class)) { + Ok(glob) => glob, + Err(error) => panic!( + "event class glob {class:?} should produce the regex + {regex:?}, but instead failed to parse: {error}" + ), + }; + assert_eq!( + dbg!(regex), + dbg!(&glob.regex), + "event class {class:?} should produce the regex {regex:?}" + ); + } + } +} diff --git a/nexus/db-queries/Cargo.toml b/nexus/db-queries/Cargo.toml index 5973da39d6a..2401088734a 100644 --- a/nexus/db-queries/Cargo.toml +++ b/nexus/db-queries/Cargo.toml @@ -35,6 +35,7 @@ pq-sys = "*" qorb.workspace = true rand.workspace = true ref-cast.workspace = true +regex.workspace = true schemars.workspace = true semver.workspace = true serde.workspace = true diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 927526695fb..1d77112817a 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -110,6 +110,9 @@ mod vmm; mod volume; mod volume_repair; mod vpc; +pub mod webhook_delivery; +mod webhook_event; +mod webhook_rx; mod zpool; pub use address_lot::AddressLotCreateResult; diff --git a/nexus/db-queries/src/db/datastore/webhook_delivery.rs b/nexus/db-queries/src/db/datastore/webhook_delivery.rs new file mode 100644 index 00000000000..193811686ae --- /dev/null +++ b/nexus/db-queries/src/db/datastore/webhook_delivery.rs @@ -0,0 +1,580 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods for webhook event deliveries + +use super::DataStore; +use crate::context::OpContext; +use crate::db::IncompleteOnConflictExt; +use crate::db::datastore::RunnableQuery; +use crate::db::error::ErrorHandler; +use crate::db::error::public_error_from_diesel; +use crate::db::model::SqlU8; +use crate::db::model::WebhookDelivery; +use crate::db::model::WebhookDeliveryAttempt; +use crate::db::model::WebhookDeliveryAttemptResult; +use crate::db::model::WebhookDeliveryState; +use crate::db::model::WebhookDeliveryTrigger; +use crate::db::model::WebhookEvent; +use crate::db::model::WebhookEventClass; +use crate::db::pagination::paginated_multicolumn; +use crate::db::schema; +use crate::db::schema::webhook_delivery::dsl; +use crate::db::schema::webhook_delivery_attempt::dsl as attempt_dsl; +use crate::db::schema::webhook_event::dsl as event_dsl; +use crate::db::update_and_check::UpdateAndCheck; +use crate::db::update_and_check::UpdateAndQueryResult; +use crate::db::update_and_check::UpdateStatus; +use async_bb8_diesel::AsyncRunQueryDsl; +use chrono::TimeDelta; +use chrono::{DateTime, Utc}; +use diesel::prelude::*; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::Error; +use omicron_common::api::external::ListResultVec; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::OmicronZoneUuid; +use omicron_uuid_kinds::WebhookReceiverUuid; +use uuid::Uuid; + +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum DeliveryAttemptState { + Started, + AlreadyCompleted(DateTime), + InProgress { nexus_id: OmicronZoneUuid, started: DateTime }, +} + +#[derive(Debug, Clone)] +pub struct DeliveryConfig { + pub first_retry_backoff: TimeDelta, + pub second_retry_backoff: TimeDelta, + pub lease_timeout: TimeDelta, +} + +impl DataStore { + pub async fn webhook_delivery_create_batch( + &self, + opctx: &OpContext, + deliveries: Vec, + ) -> CreateResult { + let conn = self.pool_connection_authorized(opctx).await?; + diesel::insert_into(dsl::webhook_delivery) + .values(deliveries) + // N.B. that this is intended to ignore conflicts on the + // "one_webhook_event_dispatch_per_rx" index, but ON CONFLICT ... DO + // NOTHING can't be used with the names of indices, only actual + // UNIQUE CONSTRAINTs. So we just do a blanket ON CONFLICT DO + // NOTHING, which is fine, becausse the only other uniqueness + // constraint is the UUID primary key, and we kind of assume UUID + // collisions don't happen. Oh well. + .on_conflict((dsl::event_id, dsl::rx_id)) + .as_partial_index() + .do_nothing() + .execute_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + /// Returns a list of all permanently-failed deliveries which are eligible + /// to resend should a liveness probe with `resend=true` succeed. + pub async fn webhook_rx_list_resendable_events( + &self, + opctx: &OpContext, + rx_id: &WebhookReceiverUuid, + ) -> ListResultVec { + Self::rx_list_resendable_events_query(*rx_id) + .load_async(&*self.pool_connection_authorized(opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + fn rx_list_resendable_events_query( + rx_id: WebhookReceiverUuid, + ) -> impl RunnableQuery { + use diesel::dsl::*; + let (delivery, also_delivery) = diesel::alias!( + schema::webhook_delivery as delivery, + schema::webhook_delivery as also_delivey + ); + event_dsl::webhook_event + .filter(event_dsl::event_class.ne(WebhookEventClass::Probe)) + .inner_join( + delivery.on(delivery.field(dsl::event_id).eq(event_dsl::id)), + ) + .filter(delivery.field(dsl::rx_id).eq(rx_id.into_untyped_uuid())) + .filter(not(exists( + also_delivery + .select(also_delivery.field(dsl::id)) + .filter( + also_delivery.field(dsl::event_id).eq(event_dsl::id), + ) + .filter( + also_delivery + .field(dsl::state) + .ne(WebhookDeliveryState::Failed), + ) + .filter( + also_delivery + .field(dsl::triggered_by) + .ne(WebhookDeliveryTrigger::Probe), + ), + ))) + .select(WebhookEvent::as_select()) + // the inner join means we may return the same event multiple times, + // so only return distinct events. + .distinct() + } + + pub async fn webhook_rx_delivery_list( + &self, + opctx: &OpContext, + rx_id: &WebhookReceiverUuid, + triggers: &'static [WebhookDeliveryTrigger], + only_states: Vec, + pagparams: &DataPageParams<'_, (DateTime, Uuid)>, + ) -> ListResultVec<( + WebhookDelivery, + WebhookEventClass, + Vec, + )> { + let conn = self.pool_connection_authorized(opctx).await?; + // Paginate the query, ordered by delivery UUID. + let mut query = paginated_multicolumn( + dsl::webhook_delivery, + (dsl::time_created, dsl::id), + pagparams, + ) + // Select only deliveries that are to the receiver we're interested in, + // and were initiated by the triggers we're interested in. + .filter( + dsl::rx_id + .eq(rx_id.into_untyped_uuid()) + .and(dsl::triggered_by.eq_any(triggers)), + ) + // Join with the event table on the delivery's event ID, + // so that we can grab the event class of the event that initiated + // this delivery. + .inner_join( + event_dsl::webhook_event.on(dsl::event_id.eq(event_dsl::id)), + ); + if !only_states.is_empty() { + query = query.filter(dsl::state.eq_any(only_states)); + } + + let deliveries = query + .select((WebhookDelivery::as_select(), event_dsl::event_class)) + .load_async::<(WebhookDelivery, WebhookEventClass)>(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + let mut result = Vec::with_capacity(deliveries.len()); + for (delivery, class) in deliveries { + let attempts = attempt_dsl::webhook_delivery_attempt + .filter( + attempt_dsl::delivery_id + .eq(delivery.id.into_untyped_uuid()), + ) + .select(WebhookDeliveryAttempt::as_select()) + .load_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + .internal_context( + "failed to list attempts for a delivery", + ) + })?; + result.push((delivery, class, attempts)); + } + Ok(result) + } + + pub async fn webhook_rx_delivery_list_ready( + &self, + opctx: &OpContext, + rx_id: &WebhookReceiverUuid, + cfg: &DeliveryConfig, + ) -> ListResultVec<(WebhookDelivery, WebhookEventClass)> { + let conn = self.pool_connection_authorized(opctx).await?; + let now = + diesel::dsl::now.into_sql::(); + dsl::webhook_delivery + // Filter out deliveries triggered by probe requests, as those are + // executed synchronously by the probe endpoint, rather than by the + // webhook deliverator. + .filter(dsl::triggered_by.ne(WebhookDeliveryTrigger::Probe)) + // Only select deliveries that are still in progress. + .filter( + dsl::time_completed + .is_null() + .and(dsl::state.eq(WebhookDeliveryState::Pending)), + ) + .filter(dsl::rx_id.eq(rx_id.into_untyped_uuid())) + .filter((dsl::deliverator_id.is_null()).or( + dsl::time_leased.is_not_null().and( + dsl::time_leased.le(now.nullable() - cfg.lease_timeout), + ), + )) + .filter( + // Retry backoffs: one of the following must be true: + // - the delivery has not yet been attempted, + dsl::attempts + .eq(0) + // - this is the first retry and the previous attempt was at + // least `first_retry_backoff` ago, or + .or(dsl::attempts.eq(1).and( + dsl::time_leased + .le(now.nullable() - cfg.first_retry_backoff), + )) + // - this is the second retry, and the previous attempt was at + // least `second_retry_backoff` ago. + .or(dsl::attempts.eq(2).and( + dsl::time_leased + .le(now.nullable() - cfg.second_retry_backoff), + )), + ) + .order_by(dsl::time_created.asc()) + // Join with the `webhook_event` table to get the event class, which + // is necessary to construct delivery requests. + .inner_join( + event_dsl::webhook_event.on(event_dsl::id.eq(dsl::event_id)), + ) + .select((WebhookDelivery::as_select(), event_dsl::event_class)) + .load_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + pub async fn webhook_delivery_start_attempt( + &self, + opctx: &OpContext, + delivery: &WebhookDelivery, + nexus_id: &OmicronZoneUuid, + lease_timeout: TimeDelta, + ) -> Result { + let conn = self.pool_connection_authorized(opctx).await?; + let now = + diesel::dsl::now.into_sql::(); + let id = delivery.id.into_untyped_uuid(); + let updated = diesel::update(dsl::webhook_delivery) + .filter( + dsl::time_completed + .is_null() + .and(dsl::state.eq(WebhookDeliveryState::Pending)), + ) + .filter(dsl::id.eq(id)) + .filter( + dsl::deliverator_id.is_null().or(dsl::time_leased + .is_not_null() + .and(dsl::time_leased.le(now.nullable() - lease_timeout))), + ) + .set(( + dsl::time_leased.eq(now.nullable()), + dsl::deliverator_id.eq(nexus_id.into_untyped_uuid()), + )) + .check_if_exists::(id) + .execute_and_check(&conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + match updated.status { + UpdateStatus::Updated => Ok(DeliveryAttemptState::Started), + UpdateStatus::NotUpdatedButExists => { + if let Some(completed) = updated.found.time_completed { + return Ok(DeliveryAttemptState::AlreadyCompleted( + completed, + )); + } + + if let Some(started) = updated.found.time_leased { + let nexus_id = + updated.found.deliverator_id.ok_or_else(|| { + Error::internal_error( + "if a delivery attempt has a last started \ + timestamp, the database should ensure that \ + it also has a Nexus ID", + ) + })?; + return Ok(DeliveryAttemptState::InProgress { + nexus_id: nexus_id.into(), + started, + }); + } + + Err(Error::internal_error( + "couldn't start delivery attempt for some secret third reason???", + )) + } + } + } + + pub async fn webhook_delivery_finish_attempt( + &self, + opctx: &OpContext, + delivery: &WebhookDelivery, + nexus_id: &OmicronZoneUuid, + attempt: &WebhookDeliveryAttempt, + ) -> Result<(), Error> { + const MAX_ATTEMPTS: u8 = 3; + let conn = self.pool_connection_authorized(opctx).await?; + diesel::insert_into(attempt_dsl::webhook_delivery_attempt) + .values(attempt.clone()) + .on_conflict((attempt_dsl::delivery_id, attempt_dsl::attempt)) + .do_nothing() + .returning(WebhookDeliveryAttempt::as_returning()) + .execute_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + // Has the delivery either completed successfully or exhausted all of + // its retry attempts? + let new_state = + if attempt.result == WebhookDeliveryAttemptResult::Succeeded { + // The delivery has completed successfully. + WebhookDeliveryState::Delivered + } else if *attempt.attempt >= MAX_ATTEMPTS { + // The delivery attempt failed, and we are out of retries. This + // delivery has failed permanently. + WebhookDeliveryState::Failed + } else { + // This delivery attempt failed, but we still have retries + // remaining, so the delivery remains pending. + WebhookDeliveryState::Pending + }; + let (completed, new_nexus_id) = + if new_state != WebhookDeliveryState::Pending { + // If the delivery has succeeded or failed permanently, set the + // "time_completed" timestamp to mark it as finished. Also, leave + // the delivering Nexus ID in place to maintain a record of who + // finished the delivery. + (Some(Utc::now()), Some(nexus_id.into_untyped_uuid())) + } else { + // Otherwise, "unlock" the delivery for other nexii. + (None, None) + }; + + let prev_attempts = SqlU8::new((*attempt.attempt) - 1); + let UpdateAndQueryResult { status, found } = + diesel::update(dsl::webhook_delivery) + .filter(dsl::id.eq(delivery.id.into_untyped_uuid())) + .filter(dsl::deliverator_id.eq(nexus_id.into_untyped_uuid())) + .filter(dsl::attempts.eq(prev_attempts)) + // Don't mark a delivery as completed if it's already completed! + .filter( + dsl::time_completed + .is_null() + .and(dsl::state.eq(WebhookDeliveryState::Pending)), + ) + .set(( + dsl::state.eq(new_state), + dsl::time_completed.eq(completed), + // XXX(eliza): hmm this might be racy; we should probably increment this + // in place and use it to determine the attempt number? + dsl::attempts.eq(attempt.attempt), + dsl::deliverator_id.eq(new_nexus_id), + )) + .check_if_exists::(delivery.id) + .execute_and_check(&conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + })?; + + if status == UpdateStatus::Updated { + return Ok(()); + } + + if let Some(other_nexus_id) = found.deliverator_id { + return Err(Error::conflict(format!( + "cannot mark delivery completed, as {other_nexus_id:?} was \ + attempting to deliver it", + ))); + } + + if found.time_completed.is_some() + || found.state != WebhookDeliveryState::Pending + { + return Err(Error::conflict( + "delivery was already marked as completed", + )); + } + + if found.attempts != prev_attempts { + return Err(Error::conflict("wrong number of delivery attempts")); + } + + Err(Error::internal_error( + "couldn't update delivery for some other reason i didn't think of here...", + )) + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::db::explain::ExplainableAsync; + use crate::db::model::WebhookDeliveryTrigger; + use crate::db::pagination::Paginator; + use crate::db::pub_test_utils::TestDatabase; + use crate::db::raw_query_builder::expectorate_query_contents; + use nexus_types::external_api::params; + use omicron_common::api::external::IdentityMetadataCreateParams; + use omicron_test_utils::dev; + use omicron_uuid_kinds::WebhookEventUuid; + + #[tokio::test] + async fn test_dispatched_deliveries_are_unique_per_rx() { + // Test setup + let logctx = + dev::test_setup_log("test_dispatched_deliveries_are_unique_per_rx"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + // As webhook receivers are a collection that owns the delivery + // resource, we must create a "real" receiver before assigning + // deliveries to it. + let rx = datastore + .webhook_rx_create( + opctx, + params::WebhookCreate { + identity: IdentityMetadataCreateParams { + name: "test-webhook".parse().unwrap(), + description: String::new(), + }, + endpoint: "http://webhooks.example.com".parse().unwrap(), + secrets: vec!["my cool secret".to_string()], + events: vec!["test.*".to_string()], + }, + ) + .await + .unwrap(); + let rx_id = rx.rx.identity.id.into(); + let event_id = WebhookEventUuid::new_v4(); + let event = datastore + .webhook_event_create( + &opctx, + event_id, + WebhookEventClass::TestFoo, + serde_json::json!({ + "answer": 42, + }), + ) + .await + .expect("can't create ye event"); + + let dispatch1 = + WebhookDelivery::new(&event, &rx_id, WebhookDeliveryTrigger::Event); + let inserted = datastore + .webhook_delivery_create_batch(&opctx, vec![dispatch1.clone()]) + .await + .expect("dispatch 1 should insert"); + assert_eq!(inserted, 1, "first dispatched delivery should be created"); + + let dispatch2 = + WebhookDelivery::new(&event, &rx_id, WebhookDeliveryTrigger::Event); + let inserted = datastore + .webhook_delivery_create_batch(opctx, vec![dispatch2.clone()]) + .await + .expect("dispatch 2 insert should not fail"); + assert_eq!( + inserted, 0, + "dispatching an event a second time should do nothing" + ); + + let resend1 = WebhookDelivery::new( + &event, + &rx_id, + WebhookDeliveryTrigger::Resend, + ); + let inserted = datastore + .webhook_delivery_create_batch(opctx, vec![resend1.clone()]) + .await + .expect("resend 1 insert should not fail"); + assert_eq!( + inserted, 1, + "resending an event should create a new delivery" + ); + + let resend2 = WebhookDelivery::new( + &event, + &rx_id, + WebhookDeliveryTrigger::Resend, + ); + let inserted = datastore + .webhook_delivery_create_batch(opctx, vec![resend2.clone()]) + .await + .expect("resend 2 insert should not fail"); + assert_eq!( + inserted, 1, + "resending an event a second time should create a new delivery" + ); + + let mut all_deliveries = std::collections::HashSet::new(); + let mut paginator = + Paginator::new(crate::db::datastore::SQL_BATCH_SIZE); + while let Some(p) = paginator.next() { + let deliveries = datastore + .webhook_rx_delivery_list( + &opctx, + &rx_id, + WebhookDeliveryTrigger::ALL, + Vec::new(), + &p.current_pagparams(), + ) + .await + .unwrap(); + paginator = p.found_batch(&deliveries, &|(d, _, _)| { + (d.time_created, *d.id.as_untyped_uuid()) + }); + all_deliveries + .extend(deliveries.into_iter().map(|(d, _, _)| dbg!(d).id)); + } + + assert!(all_deliveries.contains(&dispatch1.id)); + assert!(!all_deliveries.contains(&dispatch2.id)); + assert!(all_deliveries.contains(&resend1.id)); + assert!(all_deliveries.contains(&resend2.id)); + + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn expectorate_rx_list_resendable() { + let query = DataStore::rx_list_resendable_events_query( + WebhookReceiverUuid::nil(), + ); + + expectorate_query_contents( + &query, + "tests/output/webhook_rx_list_resendable_events.sql", + ) + .await; + } + + #[tokio::test] + async fn explain_rx_list_resendable_events() { + let logctx = dev::test_setup_log("explain_rx_list_resendable_events"); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = pool.claim().await.unwrap(); + + let query = DataStore::rx_list_resendable_events_query( + WebhookReceiverUuid::nil(), + ); + let explanation = query + .explain_async(&conn) + .await + .expect("Failed to explain query - is it valid SQL?"); + + eprintln!("{explanation}"); + + assert!( + !explanation.contains("FULL SCAN"), + "Found an unexpected FULL SCAN: {}", + explanation + ); + + db.terminate().await; + logctx.cleanup_successful(); + } +} diff --git a/nexus/db-queries/src/db/datastore/webhook_event.rs b/nexus/db-queries/src/db/datastore/webhook_event.rs new file mode 100644 index 00000000000..cac1a0b77a8 --- /dev/null +++ b/nexus/db-queries/src/db/datastore/webhook_event.rs @@ -0,0 +1,84 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods for webhook events and event delivery dispatching. + +use super::DataStore; +use crate::context::OpContext; +use crate::db::error::ErrorHandler; +use crate::db::error::public_error_from_diesel; +use crate::db::model::WebhookEvent; +use crate::db::model::WebhookEventClass; +use crate::db::model::WebhookEventIdentity; +use crate::db::schema::webhook_event::dsl as event_dsl; +use async_bb8_diesel::AsyncRunQueryDsl; +use diesel::prelude::*; +use diesel::result::OptionalExtension; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::Error; +use omicron_common::api::external::UpdateResult; +use omicron_uuid_kinds::{GenericUuid, WebhookEventUuid}; + +impl DataStore { + pub async fn webhook_event_create( + &self, + opctx: &OpContext, + id: WebhookEventUuid, + event_class: WebhookEventClass, + event: serde_json::Value, + ) -> CreateResult { + let conn = self.pool_connection_authorized(&opctx).await?; + diesel::insert_into(event_dsl::webhook_event) + .values(WebhookEvent { + identity: WebhookEventIdentity::new(id), + time_dispatched: None, + event_class, + event, + num_dispatched: 0, + }) + .returning(WebhookEvent::as_returning()) + .get_result_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + pub async fn webhook_event_select_next_for_dispatch( + &self, + opctx: &OpContext, + ) -> Result, Error> { + let conn = self.pool_connection_authorized(&opctx).await?; + event_dsl::webhook_event + .filter(event_dsl::time_dispatched.is_null()) + .order_by(event_dsl::time_created.asc()) + .select(WebhookEvent::as_select()) + .first_async(&*conn) + .await + .optional() + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + pub async fn webhook_event_mark_dispatched( + &self, + opctx: &OpContext, + event_id: &WebhookEventUuid, + subscribed: usize, + ) -> UpdateResult { + let subscribed = i64::try_from(subscribed).map_err(|_| { + // that is way too many webhook receivers! + Error::internal_error( + "webhook event subscribed count exceeds i64::MAX", + ) + })?; + let conn = self.pool_connection_authorized(&opctx).await?; + diesel::update(event_dsl::webhook_event) + .filter(event_dsl::id.eq(event_id.into_untyped_uuid())) + .set(( + event_dsl::time_dispatched.eq(diesel::dsl::now), + event_dsl::num_dispatched.eq(subscribed), + )) + .execute_async(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } +} diff --git a/nexus/db-queries/src/db/datastore/webhook_rx.rs b/nexus/db-queries/src/db/datastore/webhook_rx.rs new file mode 100644 index 00000000000..a6e95466d62 --- /dev/null +++ b/nexus/db-queries/src/db/datastore/webhook_rx.rs @@ -0,0 +1,1361 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! [`DataStore`] methods for webhook receiver management. + +use super::DataStore; +use crate::authz; +use crate::context::OpContext; +use crate::db; +use crate::db::TransactionError; +use crate::db::collection_insert::AsyncInsertError; +use crate::db::collection_insert::DatastoreCollection; +use crate::db::datastore::RunnableQuery; +use crate::db::error::ErrorHandler; +use crate::db::error::public_error_from_diesel; +use crate::db::model::Generation; +use crate::db::model::Name; +use crate::db::model::SCHEMA_VERSION; +use crate::db::model::SemverVersion; +use crate::db::model::WebhookEventClass; +use crate::db::model::WebhookGlob; +use crate::db::model::WebhookReceiver; +use crate::db::model::WebhookReceiverConfig; +use crate::db::model::WebhookReceiverIdentity; +use crate::db::model::WebhookRxEventGlob; +use crate::db::model::WebhookRxSubscription; +use crate::db::model::WebhookSecret; +use crate::db::model::WebhookSubscriptionKind; +use crate::db::pagination::paginated; +use crate::db::pagination::paginated_multicolumn; +use crate::db::pool::DbConnection; +use crate::db::schema::webhook_delivery::dsl as delivery_dsl; +use crate::db::schema::webhook_delivery_attempt::dsl as delivery_attempt_dsl; +use crate::db::schema::webhook_event::dsl as event_dsl; +use crate::db::schema::webhook_receiver::dsl as rx_dsl; +use crate::db::schema::webhook_rx_event_glob::dsl as glob_dsl; +use crate::db::schema::webhook_rx_subscription::dsl as subscription_dsl; +use crate::db::schema::webhook_secret::dsl as secret_dsl; +use crate::db::update_and_check::UpdateAndCheck; +use crate::db::update_and_check::UpdateStatus; +use crate::transaction_retry::OptionalError; +use async_bb8_diesel::AsyncRunQueryDsl; +use diesel::prelude::*; +use nexus_types::external_api::params; +use nexus_types::identity::Resource; +use nexus_types::internal_api::background::WebhookGlobStatus; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::Error; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::ResourceType; +use omicron_common::api::external::UpdateResult; +use omicron_common::api::external::http_pagination::PaginatedBy; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::WebhookReceiverUuid; +use ref_cast::RefCast; +use uuid::Uuid; + +impl DataStore { + pub async fn webhook_rx_create( + &self, + opctx: &OpContext, + params: params::WebhookCreate, + ) -> CreateResult { + // TODO(eliza): someday we gotta allow creating webhooks with more + // restrictive permissions... + opctx.authorize(authz::Action::CreateChild, &authz::FLEET).await?; + + let conn = self.pool_connection_authorized(opctx).await?; + let params::WebhookCreate { identity, endpoint, secrets, events } = + params; + + let subscriptions = events + .into_iter() + .map(WebhookSubscriptionKind::new) + .collect::, _>>()?; + let err = OptionalError::new(); + let (rx, secrets) = self + .transaction_retry_wrapper("webhook_rx_create") + .transaction(&conn, |conn| { + // make a fresh UUID for each transaction, in case the + // transaction fails because of a UUID collision. + // + // this probably won't happen, but, ya know... + let id = WebhookReceiverUuid::new_v4(); + let receiver = WebhookReceiver { + identity: WebhookReceiverIdentity::new( + id, + identity.clone(), + ), + endpoint: endpoint.to_string(), + secret_gen: Generation::new(), + subscription_gen: Generation::new(), + }; + let subscriptions = subscriptions.clone(); + let secret_keys = secrets.clone(); + let err = err.clone(); + let name = identity.name.clone(); + async move { + let rx = diesel::insert_into(rx_dsl::webhook_receiver) + .values(receiver) + .returning(WebhookReceiver::as_returning()) + .get_result_async(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::WebhookReceiver, + name.as_str(), + ), + ) + }) + })?; + + self.rx_add_subscriptions_on_conn( + opctx, + rx.identity.id.into(), + &subscriptions, + &conn, + ) + .await + .map_err(|e| match e { + TransactionError::CustomError(e) => err.bail(e), + TransactionError::Database(e) => e, + })?; + + let mut secrets = Vec::with_capacity(secret_keys.len()); + for secret in secret_keys { + let secret = self + .add_secret_on_conn( + WebhookSecret::new(id, secret), + &conn, + ) + .await + .map_err(|e| match e { + TransactionError::CustomError(e) => err.bail(e), + TransactionError::Database(e) => e, + })?; + secrets.push(secret); + } + Ok((rx, secrets)) + } + }) + .await + .map_err(|e| { + if let Some(err) = err.take() { + return err; + } + public_error_from_diesel( + e, + ErrorHandler::Conflict( + ResourceType::WebhookReceiver, + identity.name.as_str(), + ), + ) + })?; + Ok(WebhookReceiverConfig { rx, secrets, events: subscriptions }) + } + + pub async fn webhook_rx_config_fetch( + &self, + opctx: &OpContext, + authz_rx: &authz::WebhookReceiver, + ) -> Result<(Vec, Vec), Error> { + opctx.authorize(authz::Action::ListChildren, authz_rx).await?; + self.rx_config_fetch_on_conn( + authz_rx.id(), + &*self.pool_connection_authorized(opctx).await?, + ) + .await + } + + async fn rx_config_fetch_on_conn( + &self, + rx_id: WebhookReceiverUuid, + conn: &async_bb8_diesel::Connection, + ) -> Result<(Vec, Vec), Error> { + let subscriptions = + self.rx_subscription_list_on_conn(rx_id, &conn).await?; + let secrets = self.rx_secret_list_on_conn(rx_id, &conn).await?; + Ok((subscriptions, secrets)) + } + + pub async fn webhook_rx_delete( + &self, + opctx: &OpContext, + authz_rx: &authz::WebhookReceiver, + db_rx: &WebhookReceiver, + ) -> DeleteResult { + opctx.authorize(authz::Action::Delete, authz_rx).await?; + let rx_id = authz_rx.id().into_untyped_uuid(); + + let err = OptionalError::new(); + let conn = self.pool_connection_authorized(opctx).await?; + self.transaction_retry_wrapper("webhook_rx_delete").transaction( + &conn, + |conn| { + let err = err.clone(); + async move { + let now = chrono::Utc::now(); + // Delete the webhook's secrets. + let secrets_deleted = + diesel::delete(secret_dsl::webhook_secret) + .filter(secret_dsl::rx_id.eq(rx_id)) + .filter(secret_dsl::time_deleted.is_null()) + .execute_async(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel( + e, + ErrorHandler::Server, + ) + .internal_context( + "failed to delete secrets", + ) + }) + })?; + + // Delete subscriptions and globs. + let exact_subscriptions_deleted = diesel::delete( + subscription_dsl::webhook_rx_subscription, + ) + .filter(subscription_dsl::rx_id.eq(rx_id)) + .execute_async(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel(e, ErrorHandler::Server) + .internal_context( + "failed to delete exact subscriptions", + ) + }) + })?; + + let globs_deleted = + diesel::delete(glob_dsl::webhook_rx_event_glob) + .filter(glob_dsl::rx_id.eq(rx_id)) + .execute_async(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel( + e, + ErrorHandler::Server, + ) + .internal_context("failed to delete globs") + }) + })?; + + let deliveries_deleted = + diesel::delete(delivery_dsl::webhook_delivery) + .filter(delivery_dsl::rx_id.eq(rx_id)) + .execute_async(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel( + e, + ErrorHandler::Server, + ) + .internal_context( + "failed to delete delivery records", + ) + }) + })?; + + let delivery_attempts_deleted = diesel::delete( + delivery_attempt_dsl::webhook_delivery_attempt, + ) + .filter(delivery_attempt_dsl::rx_id.eq(rx_id)) + .execute_async(&conn) + .await + .map_err(|e| { + err.bail_retryable_or_else(e, |e| { + public_error_from_diesel(e, ErrorHandler::Server) + .internal_context( + "failed to delete delivery attempt records", + ) + }) + })?; + // Finally, mark the webhook receiver record as deleted, + // provided that none of its children were modified in the interim. + let deleted = diesel::update(rx_dsl::webhook_receiver) + .filter(rx_dsl::id.eq(rx_id)) + .filter(rx_dsl::time_deleted.is_null()) + .filter(rx_dsl::subscription_gen.eq(db_rx.subscription_gen)) + .filter(rx_dsl::secret_gen.eq(db_rx.secret_gen)) + .set(rx_dsl::time_deleted.eq(now)) + .execute_async(&conn) + .await + .map_err(|e| err.bail_retryable_or_else(e, |e| { + public_error_from_diesel(e, ErrorHandler::Server) + .internal_context( + "failed to mark receiver as deleted", + ) + }))?; + if deleted == 0 { + return Err(err.bail(Error::conflict( + "deletion failed due to concurrent modification", + ))); + } + + slog::info!( + &opctx.log, + "deleted webhook receiver"; + "rx_id" => %rx_id, + "rx_name" => %db_rx.identity.name, + "secrets_deleted" => ?secrets_deleted, + "exact_subscriptions_deleted" => ?exact_subscriptions_deleted, + "globs_deleted" => ?globs_deleted, + "deliveries_deleted" => ?deliveries_deleted, + "delivery_attempts_deleted" => ?delivery_attempts_deleted, + ); + + Ok(()) + } + }, + ).await + .map_err(|e| { + if let Some(err) = err.take() { + return err; + } + public_error_from_diesel(e, ErrorHandler::Server) + }) + } + + pub async fn webhook_rx_update( + &self, + opctx: &OpContext, + authz_rx: &authz::WebhookReceiver, + db_rx: &WebhookReceiver, + params: params::WebhookReceiverUpdate, + ) -> UpdateResult { + use std::collections::HashSet; + + opctx.authorize(authz::Action::Modify, authz_rx).await?; + let conn = self.pool_connection_authorized(opctx).await?; + + let rx_id = authz_rx.id(); + let update = db::model::WebhookReceiverUpdate { + subscription_gen: None, + name: params.identity.name.map(db::model::Name), + description: params.identity.description, + endpoint: params.endpoint.as_ref().map(ToString::to_string), + time_modified: chrono::Utc::now(), + }; + + // If the update changes event class subscriptions, query to get the + // current subscriptions so we can determine the difference in order to + // apply the update. + // + // If we are changing subscriptions, we must perform the changes to the + // subscription table in a transaction with the changes to the receiver + // table, so that we can undo those changes should the receiver update fail. + let rx = if let Some(new_subscriptions) = params.events { + let new_subscriptions = new_subscriptions + .into_iter() + .map(WebhookSubscriptionKind::new) + .collect::, _>>()?; + let curr_subscriptions = self + .rx_subscription_list_on_conn(rx_id, &conn) + .await? + .into_iter() + .collect::>(); + let err = OptionalError::new(); + self.transaction_retry_wrapper("webhook_rx_update") + .transaction(&conn, |conn| { + let mut update = update.clone(); + let new_subscriptions = new_subscriptions.clone(); + let curr_subscriptions = curr_subscriptions.clone(); + let db_rx = db_rx.clone(); + let err = err.clone(); + async move { + let subs_added = self + .rx_add_subscriptions_on_conn( + opctx, + rx_id, + new_subscriptions + .difference(&curr_subscriptions), + &conn, + ) + .await + .map_err(|e| match e { + TransactionError::CustomError(e) => err.bail(e), + TransactionError::Database(e) => e, + })?; + let subs_deleted = self + .rx_delete_subscriptions_on_conn( + opctx, + rx_id, + curr_subscriptions + .difference(&new_subscriptions) + .cloned() + .collect::>(), + &conn, + ) + .await?; + if subs_added + subs_deleted > 0 { + update.subscription_gen = + Some(db_rx.subscription_gen.next().into()); + } + self.rx_record_update_on_conn(&db_rx, update, &conn) + .await + .map_err(|e| match e { + TransactionError::CustomError(e) => err.bail(e), + TransactionError::Database(e) => e, + }) + } + }) + .await + .map_err(|e| { + if let Some(err) = err.take() { + return err; + } + public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource(authz_rx), + ) + })? + } else { + // If we are *not* changing subscriptions, we can just update the + // receiver record, eliding the transaction. This will still fail if + // the subscription generation has changed since we snapshotted the + // receiver. + self.rx_record_update_on_conn(db_rx, update, &conn).await.map_err( + |e| match e { + TransactionError::CustomError(e) => e, + TransactionError::Database(e) => public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource(authz_rx), + ), + }, + )? + }; + + Ok(rx) + } + + /// Update the `webhook_receiver` record for the provided webhook receiver + /// and update. + /// + /// This is factored out as it may or may not be run in a transaction, + /// depending on whether or not event subscriptions have changed. + async fn rx_record_update_on_conn( + &self, + curr: &WebhookReceiver, + update: db::model::WebhookReceiverUpdate, + conn: &async_bb8_diesel::Connection, + ) -> Result> { + let rx_id = curr.identity.id.into_untyped_uuid(); + let result = diesel::update(rx_dsl::webhook_receiver) + .filter(rx_dsl::id.eq(rx_id)) + .filter(rx_dsl::time_deleted.is_null()) + .filter(rx_dsl::subscription_gen.eq(curr.subscription_gen)) + .set(update) + .check_if_exists::(rx_id) + .execute_and_check(&conn) + .await + .map_err(TransactionError::Database)?; + + match result.status { + UpdateStatus::Updated => Ok(result.found), + UpdateStatus::NotUpdatedButExists => Err(Error::conflict( + "cannot update receiver configuration, as it has changed \ + concurrently", + ) + .into()), + } + } + + pub async fn webhook_rx_list( + &self, + opctx: &OpContext, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + let conn = self.pool_connection_authorized(opctx).await?; + + // As we would like to return a list of `WebhookReceiverConfig` structs, + // which own `Vec`s of the receiver's secrets and event class + // subscriptions, we'll do this by first querying the database to load + // all the receivers, and then querying for their individual lists of + // secrets and event class subscriptions. + // + // This is a bit unfortunate, and it would be nicer to do this with + // JOINs, but it's a bit hairy as the subscriptions come from both the + // `webhook_rx_subscription` and `webhook_rx_glob` tables... + + let receivers = match pagparams { + PaginatedBy::Id(pagparams) => { + paginated(rx_dsl::webhook_receiver, rx_dsl::id, &pagparams) + } + PaginatedBy::Name(pagparams) => paginated( + rx_dsl::webhook_receiver, + rx_dsl::name, + &pagparams.map_name(|n| Name::ref_cast(n)), + ), + } + .filter(rx_dsl::time_deleted.is_null()) + .select(WebhookReceiver::as_select()) + .load_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + .internal_context("failed to list receivers") + })?; + + // Now that we've got the current page of receivers, go and get their + // event subscriptions and secrets. + let mut result = Vec::with_capacity(receivers.len()); + for rx in receivers { + let secrets = self.rx_secret_list_on_conn(rx.id(), &conn).await?; + let events = + self.rx_subscription_list_on_conn(rx.id(), &conn).await?; + result.push(WebhookReceiverConfig { rx, secrets, events }); + } + + Ok(result) + } + + // + // Subscriptions + // + + pub async fn webhook_rx_is_subscribed_to_event( + &self, + opctx: &OpContext, + authz_rx: &authz::WebhookReceiver, + authz_event: &authz::WebhookEvent, + ) -> Result { + let conn = self.pool_connection_authorized(opctx).await?; + let event_class = event_dsl::webhook_event + .filter(event_dsl::id.eq(authz_event.id().into_untyped_uuid())) + .select(event_dsl::event_class) + .single_value(); + subscription_dsl::webhook_rx_subscription + .filter( + subscription_dsl::rx_id.eq(authz_rx.id().into_untyped_uuid()), + ) + .filter(subscription_dsl::event_class.nullable().eq(event_class)) + .select(subscription_dsl::rx_id) + .first_async::(&*conn) + .await + .optional() + .map(|x| x.is_some()) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + async fn rx_subscription_list_on_conn( + &self, + rx_id: WebhookReceiverUuid, + conn: &async_bb8_diesel::Connection, + ) -> ListResultVec { + // TODO(eliza): rather than performing two separate queries, this could + // perhaps be expressed using a SQL `union`, with an added "label" + // column to distinguish between globs and exact subscriptions, but this + // is a bit more complex, and would require raw SQL... + + // First, get all the exact subscriptions that aren't from globs. + let exact = subscription_dsl::webhook_rx_subscription + .filter(subscription_dsl::rx_id.eq(rx_id.into_untyped_uuid())) + .filter(subscription_dsl::glob.is_null()) + .select(subscription_dsl::event_class) + .load_async::(conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + .internal_context("failed to list exact subscriptions") + })?; + // Then, get the globs + let globs = glob_dsl::webhook_rx_event_glob + .filter(glob_dsl::rx_id.eq(rx_id.into_untyped_uuid())) + .select(WebhookGlob::as_select()) + .load_async::(conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + .internal_context("failed to list glob subscriptions") + })?; + let subscriptions = exact + .into_iter() + .map(WebhookSubscriptionKind::Exact) + .chain(globs.into_iter().map(WebhookSubscriptionKind::Glob)) + .collect::>(); + Ok(subscriptions) + } + + async fn rx_add_subscriptions_on_conn( + &self, + opctx: &OpContext, + rx_id: WebhookReceiverUuid, + subscriptions: impl IntoIterator, + conn: &async_bb8_diesel::Connection, + ) -> Result> { + let now = chrono::Utc::now(); + let mut exact = Vec::new(); + let mut n_globs = 0; + let mut n_glob_subscriptions = 0; + for subscription in subscriptions { + match subscription { + WebhookSubscriptionKind::Glob(glob) => { + let glob = WebhookRxEventGlob::new(rx_id, glob.clone()); + n_glob_subscriptions += self + .glob_generate_exact_subs(opctx, &glob, conn) + .await?; + + let created = + diesel::insert_into(glob_dsl::webhook_rx_event_glob) + .values(glob) + .on_conflict_do_nothing() + .execute_async(conn) + .await?; + n_globs += created; + } + WebhookSubscriptionKind::Exact(event_class) => { + exact.push(WebhookRxSubscription { + rx_id: rx_id.into(), + event_class: *event_class, + glob: None, + time_created: now, + }); + } + } + } + + let n_exact = + self.add_exact_subscription_batch_on_conn(exact, conn).await?; + slog::info!( + opctx.log, + "inserted new subscriptions for webhook receiver"; + "rx_id" => ?rx_id, + "globs" => ?n_globs, + "glob_subscriptions" => ?n_glob_subscriptions, + "exact_subscriptions" => ?n_exact, + ); + Ok(n_exact + n_globs) + } + + async fn rx_delete_subscriptions_on_conn( + &self, + opctx: &OpContext, + rx_id: WebhookReceiverUuid, + subscriptions: impl IntoIterator, + conn: &async_bb8_diesel::Connection, + ) -> Result { + let mut n_exact = 0; + let mut n_glob_subscriptions = 0; + let mut n_globs = 0; + let rx_id = rx_id.into_untyped_uuid(); + for subscription in subscriptions { + match subscription { + WebhookSubscriptionKind::Glob(glob) => { + n_glob_subscriptions += diesel::delete( + subscription_dsl::webhook_rx_subscription, + ) + .filter(subscription_dsl::rx_id.eq(rx_id)) + .filter(subscription_dsl::glob.eq(glob.glob.clone())) + .execute_async(conn) + .await?; + n_globs += diesel::delete(glob_dsl::webhook_rx_event_glob) + .filter(glob_dsl::rx_id.eq(rx_id)) + .filter(glob_dsl::glob.eq(glob.glob)) + .execute_async(conn) + .await?; + } + WebhookSubscriptionKind::Exact(event_class) => { + n_exact += diesel::delete( + subscription_dsl::webhook_rx_subscription, + ) + .filter(subscription_dsl::rx_id.eq(rx_id)) + .filter(subscription_dsl::event_class.eq(event_class)) + .execute_async(conn) + .await?; + } + } + } + + slog::info!( + opctx.log, + "deleted subscriptions for webhook receiver"; + "rx_id" => ?rx_id, + "globs" => ?n_globs, + "glob_subscriptions" => ?n_glob_subscriptions, + "exact_subscriptions" => ?n_exact, + ); + Ok(n_exact + n_globs) + } + + async fn add_exact_subscription_batch_on_conn( + &self, + subscriptions: Vec, + conn: &async_bb8_diesel::Connection, + ) -> Result { + diesel::insert_into(subscription_dsl::webhook_rx_subscription) + .values(subscriptions) + .on_conflict_do_nothing() + .execute_async(conn) + .await + } + + async fn glob_generate_exact_subs( + &self, + opctx: &OpContext, + glob: &WebhookRxEventGlob, + conn: &async_bb8_diesel::Connection, + ) -> Result> { + let regex = match regex::Regex::new(&glob.glob.regex) { + Ok(r) => r, + Err(error) => { + const MSG: &str = + "webhook glob subscription regex was not a valid regex"; + slog::error!( + &opctx.log, + "{MSG}"; + "glob" => ?glob.glob.glob, + "regex" => ?glob.glob.regex, + "error" => %error, + ); + return Err(TransactionError::CustomError( + Error::internal_error(MSG), + )); + } + }; + let subscriptions = WebhookEventClass::ALL_CLASSES + .iter() + .filter_map(|class| { + if regex.is_match(class.as_str()) { + slog::debug!( + &opctx.log, + "webhook glob matches event class"; + "rx_id" => ?glob.rx_id, + "glob" => ?glob.glob.glob, + "regex" => ?regex, + "event_class" => %class, + ); + Some(WebhookRxSubscription::for_glob(&glob, *class)) + } else { + slog::trace!( + &opctx.log, + "webhook glob does not match event class"; + "rx_id" => ?glob.rx_id, + "glob" => ?glob.glob.glob, + "regex" => ?regex, + "event_class" => %class, + ); + None + } + }) + .collect::>(); + let created = self + .add_exact_subscription_batch_on_conn(subscriptions, conn) + .await + .map_err(TransactionError::Database)?; + slog::info!( + &opctx.log, + "created {created} webhook subscriptions for glob"; + "webhook_id" => ?glob.rx_id, + "glob" => ?glob.glob.glob, + "regex" => ?regex, + ); + + Ok(created) + } + + /// List all webhook receivers whose event class subscription globs match + /// the provided `event_class`. + pub async fn webhook_rx_list_subscribed_to_event( + &self, + opctx: &OpContext, + event_class: WebhookEventClass, + ) -> Result, Error> { + let conn = self.pool_connection_authorized(opctx).await?; + Self::rx_list_subscribed_query(event_class) + .load_async::<(WebhookReceiver, WebhookRxSubscription)>(&*conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + fn rx_list_subscribed_query( + event_class: WebhookEventClass, + ) -> impl RunnableQuery<(WebhookReceiver, WebhookRxSubscription)> { + subscription_dsl::webhook_rx_subscription + .filter(subscription_dsl::event_class.eq(event_class)) + .order_by(subscription_dsl::rx_id.asc()) + .inner_join( + rx_dsl::webhook_receiver + .on(subscription_dsl::rx_id.eq(rx_dsl::id)), + ) + .filter(rx_dsl::time_deleted.is_null()) + .select(( + WebhookReceiver::as_select(), + WebhookRxSubscription::as_select(), + )) + } + + // + // Glob reprocessing + // + + pub async fn webhook_glob_list_outdated( + &self, + opctx: &OpContext, + pagparams: &DataPageParams<'_, (Uuid, String)>, + ) -> ListResultVec { + let (current_version, target_version) = + self.database_schema_version().await.map_err(|e| { + e.internal_context("couldn't load db schema version") + })?; + + if let Some(target) = target_version { + return Err(Error::InternalError { + internal_message: format!( + "webhook glob reprocessing must wait until the migration \ + from {current_version} to {target} has completed", + ), + }); + } + if current_version != SCHEMA_VERSION { + return Err(Error::InternalError { + internal_message: format!( + "cannot reprocess webhook globs, as our schema version \ + ({SCHEMA_VERSION}) doess not match the current version \ + ({current_version})", + ), + }); + } + + paginated_multicolumn( + glob_dsl::webhook_rx_event_glob, + (glob_dsl::rx_id, glob_dsl::glob), + pagparams, + ) + .filter( + glob_dsl::schema_version.ne(SemverVersion::from(SCHEMA_VERSION)), + ) + .select(WebhookRxEventGlob::as_select()) + .load_async(&*self.pool_connection_authorized(&opctx).await?) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + } + + pub async fn webhook_glob_reprocess( + &self, + opctx: &OpContext, + glob: &WebhookRxEventGlob, + ) -> Result { + slog::trace!( + opctx.log, + "reprocessing outdated webhook glob"; + "rx_id" => ?glob.rx_id, + "glob" => ?glob.glob.glob, + "prior_version" => %glob.schema_version.0, + "current_version" => %SCHEMA_VERSION, + ); + let conn = self.pool_connection_authorized(opctx).await?; + let err = OptionalError::new(); + let status = self + .transaction_retry_wrapper("webhook_glob_reprocess") + .transaction(&conn, |conn| { + let glob = glob.clone(); + let err = err.clone(); + async move { + let deleted = diesel::delete( + subscription_dsl::webhook_rx_subscription, + ) + .filter(subscription_dsl::glob.eq(glob.glob.glob.clone())) + .filter(subscription_dsl::rx_id.eq(glob.rx_id)) + .execute_async(&conn) + .await?; + let created = self + .glob_generate_exact_subs(opctx, &glob, &conn) + .await + .map_err(|e| match e { + TransactionError::CustomError(e) => { + err.bail(Err(e)) + } + TransactionError::Database(e) => e, + })?; + let did_update = + diesel::update(glob_dsl::webhook_rx_event_glob) + .filter( + glob_dsl::rx_id + .eq(glob.rx_id.into_untyped_uuid()), + ) + .filter(glob_dsl::glob.eq(glob.glob.glob.clone())) + .filter( + glob_dsl::schema_version + .eq(glob.schema_version.clone()), + ) + .set( + glob_dsl::schema_version + .eq(SemverVersion::from(SCHEMA_VERSION)), + ) + .execute_async(&conn) + .await; + match did_update { + // Either the glob has been reprocessed by someone else, or + // it has been deleted. + Err(diesel::result::Error::NotFound) | Ok(0) => { + return Err(err.bail(Ok( + WebhookGlobStatus::AlreadyReprocessed, + ))); + } + Err(e) => return Err(e), + Ok(updated) => { + debug_assert_eq!(updated, 1); + } + } + + Ok(WebhookGlobStatus::Reprocessed { + created, + deleted, + prev_version: glob.schema_version.clone().into(), + }) + } + }) + .await + .or_else(|e| { + if let Some(err) = err.take() { + err + } else { + Err(public_error_from_diesel(e, ErrorHandler::Server)) + } + })?; + + match status { + WebhookGlobStatus::Reprocessed { + created, + deleted, + ref prev_version, + } => { + slog::debug!( + opctx.log, + "reprocessed outdated webhook glob"; + "rx_id" => ?glob.rx_id, + "glob" => ?glob.glob.glob, + "prev_version" => %prev_version, + "current_version" => %SCHEMA_VERSION, + "subscriptions_created" => ?created, + "subscriptions_deleted" => ?deleted, + ); + } + WebhookGlobStatus::AlreadyReprocessed => { + slog::trace!( + opctx.log, + "outdated webhook glob was either already reprocessed or deleted"; + "rx_id" => ?glob.rx_id, + "glob" => ?glob.glob.glob, + "prev_version" => %glob.schema_version.0, + "current_version" => %SCHEMA_VERSION, + ); + } + } + + Ok(status) + } + + // + // Secrets + // + + pub async fn webhook_rx_secret_list( + &self, + opctx: &OpContext, + authz_rx: &authz::WebhookReceiver, + ) -> ListResultVec { + opctx.authorize(authz::Action::ListChildren, authz_rx).await?; + let conn = self.pool_connection_authorized(&opctx).await?; + self.rx_secret_list_on_conn(authz_rx.id(), &conn).await + } + + async fn rx_secret_list_on_conn( + &self, + rx_id: WebhookReceiverUuid, + conn: &async_bb8_diesel::Connection, + ) -> ListResultVec { + secret_dsl::webhook_secret + .filter(secret_dsl::rx_id.eq(rx_id.into_untyped_uuid())) + .filter(secret_dsl::time_deleted.is_null()) + .select(WebhookSecret::as_select()) + .load_async(conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + .internal_context("failed to list webhook receiver secrets") + }) + } + + pub async fn webhook_rx_secret_create( + &self, + opctx: &OpContext, + authz_rx: &authz::WebhookReceiver, + secret: WebhookSecret, + ) -> CreateResult { + opctx.authorize(authz::Action::CreateChild, authz_rx).await?; + let conn = self.pool_connection_authorized(&opctx).await?; + let secret = self.add_secret_on_conn(secret, &conn).await.map_err( + |e| match e { + TransactionError::CustomError(e) => e, + TransactionError::Database(e) => public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource(authz_rx), + ), + }, + )?; + Ok(secret) + } + + pub async fn webhook_rx_secret_delete( + &self, + opctx: &OpContext, + authz_rx: &authz::WebhookReceiver, + authz_secret: &authz::WebhookSecret, + ) -> DeleteResult { + opctx.authorize(authz::Action::Delete, authz_secret).await?; + diesel::delete(secret_dsl::webhook_secret) + .filter(secret_dsl::id.eq(authz_secret.id().into_untyped_uuid())) + .filter(secret_dsl::rx_id.eq(authz_rx.id().into_untyped_uuid())) + .execute_async(&*self.pool_connection_authorized(&opctx).await?) + .await + .map_err(|e| { + public_error_from_diesel( + e, + ErrorHandler::NotFoundByResource(authz_secret), + ) + })?; + Ok(()) + } + + async fn add_secret_on_conn( + &self, + secret: WebhookSecret, + conn: &async_bb8_diesel::Connection, + ) -> Result> { + let rx_id = secret.webhook_receiver_id; + let secret: WebhookSecret = WebhookReceiver::insert_resource( + rx_id.into_untyped_uuid(), + diesel::insert_into(secret_dsl::webhook_secret).values(secret), + ) + .insert_and_get_result_async(conn) + .await + .map_err(async_insert_error_to_txn(rx_id.into()))?; + Ok(secret) + } +} + +fn async_insert_error_to_txn( + rx_id: WebhookReceiverUuid, +) -> impl FnOnce(AsyncInsertError) -> TransactionError { + move |e| match e { + AsyncInsertError::CollectionNotFound => { + TransactionError::CustomError(Error::not_found_by_id( + ResourceType::WebhookReceiver, + &rx_id.into_untyped_uuid(), + )) + } + AsyncInsertError::DatabaseError(e) => TransactionError::Database(e), + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::authz; + use crate::db::explain::ExplainableAsync; + use crate::db::lookup::LookupPath; + use crate::db::pub_test_utils::TestDatabase; + use omicron_common::api::external::IdentityMetadataCreateParams; + use omicron_test_utils::dev; + use omicron_uuid_kinds::WebhookEventUuid; + + async fn create_receiver( + datastore: &DataStore, + opctx: &OpContext, + name: &str, + events: Vec, + ) -> WebhookReceiverConfig { + datastore + .webhook_rx_create( + opctx, + params::WebhookCreate { + identity: IdentityMetadataCreateParams { + name: name.parse().unwrap(), + description: "it'sa webhook".to_string(), + }, + endpoint: format!("http://{name}").parse().unwrap(), + secrets: vec![name.to_string()], + events, + }, + ) + .await + .expect("cant create ye webhook receiver!!!!") + } + + async fn create_event( + datastore: &DataStore, + opctx: &OpContext, + event_class: WebhookEventClass, + ) -> (authz::WebhookEvent, crate::db::model::WebhookEvent) { + let id = WebhookEventUuid::new_v4(); + datastore + .webhook_event_create(opctx, id, event_class, serde_json::json!({})) + .await + .expect("cant create ye event"); + LookupPath::new(opctx, datastore) + .webhook_event_id(id) + .fetch() + .await + .expect( + "cant get ye event (i just created it, so this is extra weird?)", + ) + } + + #[tokio::test] + async fn test_event_class_globs() { + // Test setup + let logctx = dev::test_setup_log("test_event_class_globs"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + let mut all_rxs: Vec = Vec::new(); + async fn create_rx( + datastore: &DataStore, + opctx: &OpContext, + all_rxs: &mut Vec, + name: &str, + subscription: &str, + ) -> WebhookReceiverConfig { + let rx = create_receiver( + datastore, + opctx, + name, + vec![subscription.to_string()], + ) + .await; + all_rxs.push(rx.clone()); + rx + } + + let test_star = + create_rx(&datastore, &opctx, &mut all_rxs, "test-star", "test.*") + .await; + let test_starstar = create_rx( + &datastore, + &opctx, + &mut all_rxs, + "test-starstar", + "test.**", + ) + .await; + let test_foo_star = create_rx( + &datastore, + &opctx, + &mut all_rxs, + "test-foo-star", + "test.foo.*", + ) + .await; + let test_star_baz = create_rx( + &datastore, + &opctx, + &mut all_rxs, + "test-star-baz", + "test.*.baz", + ) + .await; + let test_starstar_baz = create_rx( + &datastore, + &opctx, + &mut all_rxs, + "test-starstar-baz", + "test.**.baz", + ) + .await; + let test_quux_star = create_rx( + &datastore, + &opctx, + &mut all_rxs, + "test-quux-star", + "test.quux.*", + ) + .await; + let test_quux_starstar = create_rx( + &datastore, + &opctx, + &mut all_rxs, + "test-quux-starstar", + "test.quux.**", + ) + .await; + + async fn check_event( + datastore: &DataStore, + opctx: &OpContext, + all_rxs: &Vec, + event_class: WebhookEventClass, + matches: &[&WebhookReceiverConfig], + ) { + let subscribed = datastore + .webhook_rx_list_subscribed_to_event(opctx, event_class) + .await + .unwrap() + .into_iter() + .map(|(rx, subscription)| { + eprintln!( + "receiver is subscribed to event {event_class}:\n\t\ + rx: {} ({})\n\tsubscription: {subscription:?}", + rx.identity.name, rx.identity.id, + ); + rx.identity + }) + .collect::>(); + + for WebhookReceiverConfig { rx, events, .. } in matches { + assert!( + subscribed.contains(&rx.identity), + "expected {rx:?} to be subscribed to {event_class}\n\ + subscriptions: {events:?}" + ); + } + + let not_matches = all_rxs.iter().filter( + |WebhookReceiverConfig { rx, .. }| { + matches + .iter() + .all(|match_rx| rx.identity != match_rx.rx.identity) + }, + ); + for WebhookReceiverConfig { rx, events, .. } in not_matches { + assert!( + !subscribed.contains(&rx.identity), + "expected {rx:?} to not be subscribed to {event_class}\n\ + subscriptions: {events:?}" + ); + } + } + + check_event( + datastore, + opctx, + &all_rxs, + WebhookEventClass::TestFoo, + &[&test_star, &test_starstar], + ) + .await; + check_event( + datastore, + opctx, + &all_rxs, + WebhookEventClass::TestFooBar, + &[&test_starstar, &test_foo_star], + ) + .await; + check_event( + datastore, + opctx, + &all_rxs, + WebhookEventClass::TestFooBaz, + &[ + &test_starstar, + &test_foo_star, + &test_star_baz, + &test_starstar_baz, + ], + ) + .await; + check_event( + datastore, + opctx, + &all_rxs, + WebhookEventClass::TestQuuxBar, + &[&test_starstar, &test_quux_star, &test_quux_starstar], + ) + .await; + check_event( + datastore, + opctx, + &all_rxs, + WebhookEventClass::TestQuuxBarBaz, + &[&test_starstar, &test_quux_starstar, &test_starstar_baz], + ) + .await; + + // Clean up. + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn explain_event_class_glob() { + let logctx = dev::test_setup_log("explain_event_class_glob"); + let db = TestDatabase::new_with_pool(&logctx.log).await; + let pool = db.pool(); + let conn = pool.claim().await.unwrap(); + + let query = + DataStore::rx_list_subscribed_query(WebhookEventClass::TestFooBar); + let explanation = query + .explain_async(&conn) + .await + .expect("Failed to explain query - is it valid SQL?"); + println!("{explanation}"); + + db.terminate().await; + logctx.cleanup_successful(); + } + + #[tokio::test] + async fn test_rx_is_subscribed_to_event() { + // Test setup + let logctx = dev::test_setup_log("test_rx_is_subscribed_to_event"); + let db = TestDatabase::new_with_datastore(&logctx.log).await; + let (opctx, datastore) = (db.opctx(), db.datastore()); + let rx = create_receiver( + datastore, + opctx, + "webhooked-on-phonics", + vec!["test.*.bar".to_string()], + ) + .await; + + let (authz_rx, _) = LookupPath::new(opctx, datastore) + .webhook_receiver_id(rx.rx.id()) + .fetch() + .await + .expect("cant get ye receiver"); + + let (authz_foo, _) = + create_event(datastore, opctx, WebhookEventClass::TestFoo).await; + let (authz_foo_bar, _) = + create_event(datastore, opctx, WebhookEventClass::TestFooBar).await; + let (authz_quux_bar, _) = + create_event(datastore, opctx, WebhookEventClass::TestQuuxBar) + .await; + + let is_subscribed_foo = datastore + .webhook_rx_is_subscribed_to_event(opctx, &authz_rx, &authz_foo) + .await; + assert_eq!(is_subscribed_foo, Ok(false)); + + let is_subscribed_foo_bar = datastore + .webhook_rx_is_subscribed_to_event(opctx, &authz_rx, &authz_foo_bar) + .await; + assert_eq!(is_subscribed_foo_bar, Ok(true)); + + let is_subscribed_quux_bar = datastore + .webhook_rx_is_subscribed_to_event( + opctx, + &authz_rx, + &authz_quux_bar, + ) + .await; + assert_eq!(is_subscribed_quux_bar, Ok(true)); + + db.terminate().await; + logctx.cleanup_successful(); + } +} diff --git a/nexus/db-queries/src/db/lookup.rs b/nexus/db-queries/src/db/lookup.rs index 4ced0d608eb..9d79a7e8b08 100644 --- a/nexus/db-queries/src/db/lookup.rs +++ b/nexus/db-queries/src/db/lookup.rs @@ -26,6 +26,9 @@ use omicron_uuid_kinds::SupportBundleUuid; use omicron_uuid_kinds::TufArtifactKind; use omicron_uuid_kinds::TufRepoKind; use omicron_uuid_kinds::TypedUuid; +use omicron_uuid_kinds::WebhookEventUuid; +use omicron_uuid_kinds::WebhookReceiverUuid; +use omicron_uuid_kinds::WebhookSecretUuid; use uuid::Uuid; /// Look up an API resource in the database @@ -553,6 +556,59 @@ impl<'a> LookupPath<'a> { { SamlIdentityProvider::PrimaryKey(Root { lookup_root: self }, id) } + + pub fn webhook_receiver_id<'b>( + self, + id: WebhookReceiverUuid, + ) -> WebhookReceiver<'b> + where + 'a: 'b, + { + WebhookReceiver::PrimaryKey(Root { lookup_root: self }, id) + } + + /// Select a resource of type [`WebhookReceiver`], identified by its name + pub fn webhook_receiver_name<'b, 'c>( + self, + name: &'b Name, + ) -> WebhookReceiver<'c> + where + 'a: 'c, + 'b: 'c, + { + WebhookReceiver::Name(Root { lookup_root: self }, name) + } + + /// Select a resource of type [`WebhookReceiver`], identified by its owned name + pub fn webhook_receiver_name_owned<'b, 'c>( + self, + name: Name, + ) -> WebhookReceiver<'c> + where + 'a: 'c, + 'b: 'c, + { + WebhookReceiver::OwnedName(Root { lookup_root: self }, name) + } + + /// Select a resource of type [`WebhookSecret`], identified by its UUID. + pub fn webhook_secret_id<'b>( + self, + id: WebhookSecretUuid, + ) -> WebhookSecret<'b> + where + 'a: 'b, + { + WebhookSecret::PrimaryKey(Root { lookup_root: self }, id) + } + + /// Select a resource of type [`WebhookEvent`], identified by its UUID. + pub fn webhook_event_id<'b>(self, id: WebhookEventUuid) -> WebhookEvent<'b> + where + 'a: 'b, + { + WebhookEvent::PrimaryKey(Root { lookup_root: self }, id) + } } /// Represents the head of the selection path for a resource @@ -933,6 +989,36 @@ lookup_resource! { ] } +lookup_resource! { + name = "WebhookReceiver", + ancestors = [], + lookup_by_name = true, + soft_deletes = true, + primary_key_columns = [ + { column_name = "id", uuid_kind = WebhookReceiverKind } + ] +} + +lookup_resource! { + name = "WebhookSecret", + ancestors = ["WebhookReceiver"], + lookup_by_name = false, + soft_deletes = false, + primary_key_columns = [ + { column_name = "id", uuid_kind = WebhookSecretKind } + ] +} + +lookup_resource! { + name = "WebhookEvent", + ancestors = [], + lookup_by_name = false, + soft_deletes = false, + primary_key_columns = [ + { column_name = "id", uuid_kind = WebhookEventKind } + ] +} + // Helpers for unifying the interfaces around images pub enum ImageLookup<'a> { diff --git a/nexus/db-queries/src/policy_test/resource_builder.rs b/nexus/db-queries/src/policy_test/resource_builder.rs index 3d5ea068ca6..88e7b34d7a6 100644 --- a/nexus/db-queries/src/policy_test/resource_builder.rs +++ b/nexus/db-queries/src/policy_test/resource_builder.rs @@ -278,6 +278,9 @@ impl_dyn_authorized_resource_for_resource!(authz::TufArtifact); impl_dyn_authorized_resource_for_resource!(authz::TufRepo); impl_dyn_authorized_resource_for_resource!(authz::Vpc); impl_dyn_authorized_resource_for_resource!(authz::VpcSubnet); +impl_dyn_authorized_resource_for_resource!(authz::WebhookEvent); +impl_dyn_authorized_resource_for_resource!(authz::WebhookReceiver); +impl_dyn_authorized_resource_for_resource!(authz::WebhookSecret); impl_dyn_authorized_resource_for_resource!(authz::Zpool); impl_dyn_authorized_resource_for_global!(authz::Database); @@ -288,6 +291,7 @@ impl_dyn_authorized_resource_for_global!(authz::DnsConfig); impl_dyn_authorized_resource_for_global!(authz::IpPoolList); impl_dyn_authorized_resource_for_global!(authz::Inventory); impl_dyn_authorized_resource_for_global!(authz::TargetReleaseConfig); +impl_dyn_authorized_resource_for_global!(authz::WebhookEventClassList); impl DynAuthorizedResource for authz::SiloCertificateList { fn do_authorize<'a, 'b>( diff --git a/nexus/db-queries/src/policy_test/resources.rs b/nexus/db-queries/src/policy_test/resources.rs index b069d1df2a5..6853288ff09 100644 --- a/nexus/db-queries/src/policy_test/resources.rs +++ b/nexus/db-queries/src/policy_test/resources.rs @@ -74,6 +74,7 @@ pub async fn make_resources( builder.new_resource(authz::INVENTORY); builder.new_resource(authz::IP_POOL_LIST); builder.new_resource(authz::TARGET_RELEASE_CONFIG); + builder.new_resource(authz::WEBHOOK_EVENT_CLASS_LIST); // Silo/organization/project hierarchy make_silo(&mut builder, "silo1", main_silo_id, true).await; @@ -171,6 +172,16 @@ pub async fn make_resources( LookupType::ById(loopback_address_id.into_untyped_uuid()), )); + let webhook_event_id = + "31cb17da-4164-4cbf-b9a3-b3e4a687c08b".parse().unwrap(); + builder.new_resource(authz::WebhookEvent::new( + authz::FLEET, + webhook_event_id, + LookupType::ById(webhook_event_id.into_untyped_uuid()), + )); + + make_webhook_rx(&mut builder).await; + builder.build() } @@ -388,6 +399,26 @@ async fn make_project( )); } +/// Helper for `make_resources()` that constructs a webhook receiver and its +/// very miniscule hierarchy (a secret). +async fn make_webhook_rx(builder: &mut ResourceBuilder<'_>) { + let rx_name = "webhooked-on-phonics"; + let webhook_rx = authz::WebhookReceiver::new( + authz::FLEET, + omicron_uuid_kinds::WebhookReceiverUuid::new_v4(), + LookupType::ByName(rx_name.to_string()), + ); + builder.new_resource(webhook_rx.clone()); + + let webhook_secret_id = + "0c3e55cb-fcee-46e9-a2e3-0901dbd3b997".parse().unwrap(); + builder.new_resource(authz::WebhookSecret::new( + webhook_rx, + webhook_secret_id, + LookupType::ById(webhook_secret_id.into_untyped_uuid()), + )); +} + /// Returns the set of authz classes exempted from the coverage test pub fn exempted_authz_classes() -> BTreeSet { // Exemption list for the coverage test diff --git a/nexus/db-queries/tests/output/authz-roles.out b/nexus/db-queries/tests/output/authz-roles.out index 6ff26853690..e83cacbe3a9 100644 --- a/nexus/db-queries/tests/output/authz-roles.out +++ b/nexus/db-queries/tests/output/authz-roles.out @@ -124,6 +124,20 @@ resource: authz::TargetReleaseConfig silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ unauthenticated ! ! ! ! ! ! ! ! +resource: authz::WebhookEventClassList + + USER Q R LC RP M MP CC D + fleet-admin ✘ ✘ ✔ ✘ ✘ ✘ ✘ ✘ + fleet-collaborator ✘ ✘ ✔ ✘ ✘ ✘ ✘ ✘ + fleet-viewer ✘ ✘ ✔ ✘ ✘ ✘ ✘ ✘ + silo1-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + unauthenticated ! ! ! ! ! ! ! ! + resource: Silo "silo1" USER Q R LC RP M MP CC D @@ -1244,6 +1258,48 @@ resource: LoopbackAddress id "9efbf1b1-16f9-45ab-864a-f7ebe501ae5b" silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ unauthenticated ! ! ! ! ! ! ! ! +resource: WebhookEvent id "31cb17da-4164-4cbf-b9a3-b3e4a687c08b" + + USER Q R LC RP M MP CC D + fleet-admin ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ + fleet-collaborator ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + fleet-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + unauthenticated ! ! ! ! ! ! ! ! + +resource: WebhookReceiver "webhooked-on-phonics" + + USER Q R LC RP M MP CC D + fleet-admin ✘ ✔ ✔ ✔ ✔ ✔ ✔ ✔ + fleet-collaborator ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + fleet-viewer ✘ ✔ ✔ ✔ ✘ ✘ ✘ ✘ + silo1-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + unauthenticated ! ! ! ! ! ! ! ! + +resource: WebhookSecret id "0c3e55cb-fcee-46e9-a2e3-0901dbd3b997" + + USER Q R LC RP M MP CC D + fleet-admin ✘ ✔ ✘ ✔ ✔ ✔ ✘ ✔ + fleet-collaborator ✘ ✔ ✘ ✔ ✘ ✘ ✘ ✘ + fleet-viewer ✘ ✔ ✘ ✔ ✘ ✘ ✘ ✘ + silo1-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-admin ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-collaborator ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + silo1-proj1-viewer ✘ ✘ ✘ ✘ ✘ ✘ ✘ ✘ + unauthenticated ! ! ! ! ! ! ! ! + ACTIONS: Q = Query diff --git a/nexus/db-queries/tests/output/webhook_rx_list_resendable_events.sql b/nexus/db-queries/tests/output/webhook_rx_list_resendable_events.sql new file mode 100644 index 00000000000..f39e1afeb09 --- /dev/null +++ b/nexus/db-queries/tests/output/webhook_rx_list_resendable_events.sql @@ -0,0 +1,25 @@ +SELECT + DISTINCT + webhook_event.id, + webhook_event.time_created, + webhook_event.time_modified, + webhook_event.time_dispatched, + webhook_event.event_class, + webhook_event.event, + webhook_event.num_dispatched +FROM + webhook_event INNER JOIN webhook_delivery AS delivery ON delivery.event_id = webhook_event.id +WHERE + (webhook_event.event_class != $1 AND delivery.rx_id = $2) + AND NOT + ( + EXISTS( + SELECT + also_delivey.id + FROM + webhook_delivery AS also_delivey + WHERE + (also_delivey.event_id = webhook_event.id AND also_delivey.state != $3) + AND also_delivey.triggered_by != $4 + ) + ) diff --git a/nexus/examples/config-second.toml b/nexus/examples/config-second.toml index 9087c06c0c1..5387793ec81 100644 --- a/nexus/examples/config-second.toml +++ b/nexus/examples/config-second.toml @@ -143,6 +143,10 @@ region_snapshot_replacement_step.period_secs = 30 region_snapshot_replacement_finish.period_secs = 30 tuf_artifact_replication.period_secs = 300 tuf_artifact_replication.min_sled_replication = 1 +# In general, the webhook dispatcher will be activated when events are queued, +# so we don't need to periodically activate it *that* frequently. +webhook_dispatcher.period_secs = 60 +webhook_deliverator.period_secs = 60 read_only_region_replacement_start.period_secs = 30 [default_region_allocation_strategy] diff --git a/nexus/examples/config.toml b/nexus/examples/config.toml index 155e61d58d9..e5b4d564f55 100644 --- a/nexus/examples/config.toml +++ b/nexus/examples/config.toml @@ -129,6 +129,10 @@ region_snapshot_replacement_step.period_secs = 30 region_snapshot_replacement_finish.period_secs = 30 tuf_artifact_replication.period_secs = 300 tuf_artifact_replication.min_sled_replication = 1 +# In general, the webhook dispatcher will be activated when events are queued, +# so we don't need to periodically activate it *that* frequently. +webhook_dispatcher.period_secs = 60 +webhook_deliverator.period_secs = 60 read_only_region_replacement_start.period_secs = 30 [default_region_allocation_strategy] diff --git a/nexus/external-api/output/nexus_tags.txt b/nexus/external-api/output/nexus_tags.txt index a94d31314d3..51f752cf687 100644 --- a/nexus/external-api/output/nexus_tags.txt +++ b/nexus/external-api/output/nexus_tags.txt @@ -266,6 +266,21 @@ API operations found with tag "system/status" OPERATION ID METHOD URL PATH ping GET /v1/ping +API operations found with tag "system/webhooks" +OPERATION ID METHOD URL PATH +webhook_delivery_list GET /v1/webhooks/deliveries +webhook_delivery_resend POST /v1/webhooks/deliveries/{event_id}/resend +webhook_event_class_list GET /v1/webhooks/event-classes +webhook_receiver_create POST /v1/webhooks/receivers +webhook_receiver_delete DELETE /v1/webhooks/receivers/{receiver} +webhook_receiver_list GET /v1/webhooks/receivers +webhook_receiver_probe POST /v1/webhooks/receivers/{receiver}/probe +webhook_receiver_update PUT /v1/webhooks/receivers/{receiver} +webhook_receiver_view GET /v1/webhooks/receivers/{receiver} +webhook_secrets_add POST /v1/webhooks/secrets +webhook_secrets_delete DELETE /v1/webhooks/secrets/{secret_id} +webhook_secrets_list GET /v1/webhooks/secrets + API operations found with tag "vpcs" OPERATION ID METHOD URL PATH internet_gateway_create POST /v1/internet-gateways diff --git a/nexus/external-api/src/lib.rs b/nexus/external-api/src/lib.rs index abcaa0a1122..7c1eb9addd5 100644 --- a/nexus/external-api/src/lib.rs +++ b/nexus/external-api/src/lib.rs @@ -17,7 +17,10 @@ use nexus_types::{ external_api::{params, shared, views}, }; use omicron_common::api::external::{ - http_pagination::{PaginatedById, PaginatedByName, PaginatedByNameOrId}, + http_pagination::{ + PaginatedById, PaginatedByName, PaginatedByNameOrId, + PaginatedByTimeAndId, + }, *, }; use openapi_manager_types::ValidationContext; @@ -165,6 +168,12 @@ const PUT_UPDATE_REPOSITORY_MAX_BYTES: usize = 4 * GIB; url = "http://docs.oxide.computer/api/vpcs" } }, + "system/webhooks" = { + description = "Webhooks deliver notifications for audit log events and fault management alerts.", + external_docs = { + url = "http://docs.oxide.computer/api/webhooks" + } + }, "system/probes" = { description = "Probes for testing network connectivity", external_docs = { @@ -3498,6 +3507,175 @@ pub trait NexusExternalApi { rqctx: RequestContext, params: TypedBody, ) -> Result, HttpError>; + + // Webhooks + + /// List webhook event classes + #[endpoint { + method = GET, + path = "/v1/webhooks/event-classes", + tags = ["system/webhooks"], + }] + async fn webhook_event_class_list( + rqctx: RequestContext, + pag_params: Query< + PaginationParams, + >, + filter: Query, + ) -> Result>, HttpError>; + + /// List webhook receivers. + #[endpoint { + method = GET, + path = "/v1/webhooks/receivers", + tags = ["system/webhooks"], + }] + async fn webhook_receiver_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError>; + + /// Fetch webhook receiver + #[endpoint { + method = GET, + path = "/v1/webhooks/receivers/{receiver}", + tags = ["system/webhooks"], + }] + async fn webhook_receiver_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError>; + + /// Create webhook receiver. + #[endpoint { + method = POST, + path = "/v1/webhooks/receivers", + tags = ["system/webhooks"], + }] + async fn webhook_receiver_create( + rqctx: RequestContext, + params: TypedBody, + ) -> Result, HttpError>; + + /// Update webhook receiver + /// + /// Note that receiver secrets are NOT added or removed using this endpoint. + /// Instead, use the `/v1/webhooks/{secrets}/?receiver={receiver}` endpoint + /// to add and remove secrets. + #[endpoint { + method = PUT, + path = "/v1/webhooks/receivers/{receiver}", + tags = ["system/webhooks"], + }] + async fn webhook_receiver_update( + rqctx: RequestContext, + path_params: Path, + params: TypedBody, + ) -> Result; + + /// Delete webhook receiver. + #[endpoint { + method = DELETE, + path = "/v1/webhooks/receivers/{receiver}", + tags = ["system/webhooks"], + }] + async fn webhook_receiver_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result; + + /// Send liveness probe to webhook receiver + /// + /// This endpoint synchronously sends a liveness probe request to the + /// selected webhook receiver. The response message describes the outcome of + /// the probe request: either the response from the receiver endpoint, or an + /// indication of why the probe failed. + /// + /// Note that the response status is `200 OK` as long as a probe request was + /// able to be sent to the receiver endpoint. If the receiver responds with + /// another status code, including an error, this will be indicated by the + /// response body, *not* the status of the response. + /// + /// The `resend` query parameter can be used to request re-delivery of + /// failed events if the liveness probe succeeds. If it is set to true and + /// the webhook receiver responds to the probe request with a `2xx` status + /// code, any events for which delivery to this receiver has failed will be + /// queued for re-delivery. + #[endpoint { + method = POST, + path = "/v1/webhooks/receivers/{receiver}/probe", + tags = ["system/webhooks"], + }] + async fn webhook_receiver_probe( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError>; + + /// List webhook receiver secret IDs + #[endpoint { + method = GET, + path = "/v1/webhooks/secrets", + tags = ["system/webhooks"], + }] + async fn webhook_secrets_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result, HttpError>; + + /// Add secret to webhook receiver + #[endpoint { + method = POST, + path = "/v1/webhooks/secrets", + tags = ["system/webhooks"], + }] + async fn webhook_secrets_add( + rqctx: RequestContext, + query_params: Query, + params: TypedBody, + ) -> Result, HttpError>; + + /// Remove secret from webhook receiver + #[endpoint { + method = DELETE, + path = "/v1/webhooks/secrets/{secret_id}", + tags = ["system/webhooks"], + }] + async fn webhook_secrets_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result; + + /// List delivery attempts to a webhook receiver + /// + /// Optional query parameters to this endpoint may be used to filter + /// deliveries by state. If none of the `failed`, `pending` or `delivered` + /// query parameters are present, all deliveries are returned. If one or + /// more of these parameters are provided, only those which are set to + /// "true" are included in the response. + #[endpoint { + method = GET, + path = "/v1/webhooks/deliveries", + tags = ["system/webhooks"], + }] + async fn webhook_delivery_list( + rqctx: RequestContext, + receiver: Query, + state_filter: Query, + pagination: Query, + ) -> Result>, HttpError>; + + /// Request re-delivery of webhook event + #[endpoint { + method = POST, + path = "/v1/webhooks/deliveries/{event_id}/resend", + tags = ["system/webhooks"], + }] + async fn webhook_delivery_resend( + rqctx: RequestContext, + path_params: Path, + receiver: Query, + ) -> Result, HttpError>; } /// Perform extra validations on the OpenAPI spec. diff --git a/nexus/src/app/background/init.rs b/nexus/src/app/background/init.rs index e6feebd7a4a..d99b4811d7f 100644 --- a/nexus/src/app/background/init.rs +++ b/nexus/src/app/background/init.rs @@ -125,6 +125,8 @@ use super::tasks::sync_switch_configuration::SwitchPortSettingsManager; use super::tasks::tuf_artifact_replication; use super::tasks::v2p_mappings::V2PManager; use super::tasks::vpc_routes; +use super::tasks::webhook_deliverator; +use super::tasks::webhook_dispatcher::WebhookDispatcher; use crate::Nexus; use crate::app::oximeter::PRODUCER_LEASE_DURATION; use crate::app::saga::StartSaga; @@ -182,6 +184,8 @@ pub struct BackgroundTasks { pub task_region_snapshot_replacement_finish: Activator, pub task_tuf_artifact_replication: Activator, pub task_read_only_region_replacement_start: Activator, + pub task_webhook_dispatcher: Activator, + pub task_webhook_deliverator: Activator, // Handles to activate background tasks that do not get used by Nexus // at-large. These background tasks are implementation details as far as @@ -273,6 +277,8 @@ impl BackgroundTasksInitializer { task_region_snapshot_replacement_finish: Activator::new(), task_tuf_artifact_replication: Activator::new(), task_read_only_region_replacement_start: Activator::new(), + task_webhook_dispatcher: Activator::new(), + task_webhook_deliverator: Activator::new(), task_internal_dns_propagation: Activator::new(), task_external_dns_propagation: Activator::new(), @@ -343,6 +349,8 @@ impl BackgroundTasksInitializer { task_region_snapshot_replacement_finish, task_tuf_artifact_replication, task_read_only_region_replacement_start, + task_webhook_dispatcher, + task_webhook_deliverator, // Add new background tasks here. Be sure to use this binding in a // call to `Driver::register()` below. That's what actually wires // up the Activator to the corresponding background task. @@ -909,13 +917,68 @@ impl BackgroundTasksInitializer { process", period: config.read_only_region_replacement_start.period_secs, task_impl: Box::new(ReadOnlyRegionReplacementDetector::new( - datastore, + datastore.clone(), )), opctx: opctx.child(BTreeMap::new()), watchers: vec![], activator: task_read_only_region_replacement_start, }); + driver.register(TaskDefinition { + name: "webhook_dispatcher", + description: "dispatches queued webhook events to receivers", + period: config.webhook_dispatcher.period_secs, + task_impl: Box::new(WebhookDispatcher::new( + datastore.clone(), + task_webhook_deliverator.clone(), + )), + opctx: opctx.child(BTreeMap::new()), + watchers: vec![], + activator: task_webhook_dispatcher, + }); + + driver.register({ + let nexus_config::WebhookDeliveratorConfig { + lease_timeout_secs, + period_secs, + first_retry_backoff_secs, + second_retry_backoff_secs, + } = config.webhook_deliverator; + let cfg = webhook_deliverator::DeliveryConfig { + lease_timeout: chrono::TimeDelta::seconds( + lease_timeout_secs.try_into().expect( + "invalid webhook_deliverator.lease_timeout_secs", + ), + ), + first_retry_backoff: chrono::TimeDelta::seconds( + first_retry_backoff_secs.try_into().expect( + "invalid webhook_deliverator.first_retry_backoff_secs", + ), + ), + second_retry_backoff: chrono::TimeDelta::seconds( + second_retry_backoff_secs.try_into().expect( + "invalid webhook_deliverator.first_retry_backoff_secs", + ), + ), + }; + TaskDefinition { + name: "webhook_deliverator", + description: "sends webhook delivery requests", + period: period_secs, + task_impl: Box::new( + webhook_deliverator::WebhookDeliverator::new( + datastore, + cfg, + nexus_id, + args.webhook_delivery_client, + ), + ), + opctx: opctx.child(BTreeMap::new()), + watchers: vec![], + activator: task_webhook_deliverator, + } + }); + driver } } @@ -942,6 +1005,11 @@ pub struct BackgroundTasksData { pub saga_recovery: saga_recovery::SagaRecoveryHelpers>, /// Channel for TUF repository artifacts to be replicated out to sleds pub tuf_artifact_replication_rx: mpsc::Receiver, + /// `reqwest::Client` for webhook delivery requests. + /// + /// This is shared with the external API as it's also used when sending + /// webhook liveness probe requests from the API. + pub webhook_delivery_client: reqwest::Client, } /// Starts the three DNS-propagation-related background tasks for either diff --git a/nexus/src/app/background/tasks/mod.rs b/nexus/src/app/background/tasks/mod.rs index 62339a52e70..0e06f5e99c2 100644 --- a/nexus/src/app/background/tasks/mod.rs +++ b/nexus/src/app/background/tasks/mod.rs @@ -40,3 +40,5 @@ pub mod sync_switch_configuration; pub mod tuf_artifact_replication; pub mod v2p_mappings; pub mod vpc_routes; +pub mod webhook_deliverator; +pub mod webhook_dispatcher; diff --git a/nexus/src/app/background/tasks/webhook_deliverator.rs b/nexus/src/app/background/tasks/webhook_deliverator.rs new file mode 100644 index 00000000000..76b3e565417 --- /dev/null +++ b/nexus/src/app/background/tasks/webhook_deliverator.rs @@ -0,0 +1,322 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. +use crate::app::background::BackgroundTask; +use crate::app::webhook::ReceiverClient; +use futures::future::BoxFuture; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::DataStore; +use nexus_db_queries::db::datastore::webhook_delivery::DeliveryAttemptState; +pub use nexus_db_queries::db::datastore::webhook_delivery::DeliveryConfig; +use nexus_db_queries::db::model::WebhookDeliveryAttemptResult; +use nexus_db_queries::db::model::WebhookReceiverConfig; +use nexus_db_queries::db::pagination::Paginator; +use nexus_types::identity::Resource; +use nexus_types::internal_api::background::WebhookDeliveratorStatus; +use nexus_types::internal_api::background::WebhookDeliveryFailure; +use nexus_types::internal_api::background::WebhookRxDeliveryStatus; +use omicron_common::api::external::Error; +use omicron_common::api::external::http_pagination::PaginatedBy; +use omicron_uuid_kinds::{GenericUuid, OmicronZoneUuid, WebhookDeliveryUuid}; +use std::num::NonZeroU32; +use std::sync::Arc; +use tokio::task::JoinSet; + +// The Deliverator belongs to an elite order, a hallowed sub-category. He's got +// esprit up to here. Right now he is preparing to carry out his third mission +// of the night. His uniform is black as activated charcoal, filtering the very +// light out of the air. A bullet will bounce off its arachno-fiber weave like a +// wren hitting a patio door, but excess perspiration wafts through it like a +// breeze through a freshly napalmed forest. Where his body has bony +// extremities, the suit has sintered armorgel: feels like gritty jello, +// protects like a stack of telephone books. +// +// When they gave him the job, they gave him a gun. The Deliverator never deals +// in cash, but someone might come after him anyway–might want his car, or his +// cargo. The gun is tiny, aero-styled, lightweight, the kind of a gun a +// fashion designer would carry; it fires teensy darts that fly at five times +// the velocity of an SR-71 spy plane, and when you get done using it, you have +// to plug it in to the cigarette lighter, because it runs on electricity. +// +// The Deliverator never pulled that gun in anger, or in fear. He pulled it once +// in Gila Highlands. Some punks in Gila Highlands, a fancy Burbclave, wanted +// themselves a delivery, and they didn't want to pay for it. Thought they would +// impress the Deliverator with a baseball bat. The Deliverator took out his +// gun, centered its laser doo-hickey on that poised Louisville Slugger, fired +// it. The recoil was immense, as though the weapon had blown up in his hand. +// The middle third of the baseball bat turned into a column of burning sawdust +// accelerating in all directions like a bursting star. Punk ended up holding +// this bat handle with milky smoke pouring out the end. Stupid look on his +// face. Didn't get nothing but trouble from the Deliverator. +// +// Since then the Deliverator has kept the gun in the glove compartment and +// relied, instead, on a matched set of samurai swords, which have always been +// his weapon of choice anyhow. The punks in Gila Highlands weren't afraid of +// the gun, so the Deliverator was forced to use it. But swords need no +// demonstration. +// +// The Deliverator's car has enough potential energy packed into its batteries +// to fire a pound of bacon into the Asteroid Belt. Unlike a bimbo box or a Burb +// beater, the Deliverator's car unloads that power through gaping, gleaming, +// polished sphincters. When the Deliverator puts the hammer down, shit happens. +// You want to talk contact patches? Your car's tires have tiny contact patches, +// talk to the asphalt in four places the size of your tongue. The Deliverator's +// car has big sticky tires with contact patches the size of a fat lady's +// thighs. The Deliverator is in touch with the road, starts like a bad day, +// stops on a peseta. +// +// Why is the Deliverator so equipped? Because people rely on him. He is a role +// model. +// +// --- Neal Stephenson, _Snow Crash_ +#[derive(Clone)] +pub struct WebhookDeliverator { + datastore: Arc, + nexus_id: OmicronZoneUuid, + client: reqwest::Client, + cfg: DeliveryConfig, +} + +impl BackgroundTask for WebhookDeliverator { + fn activate<'a>( + &'a mut self, + opctx: &'a OpContext, + ) -> BoxFuture<'a, serde_json::Value> { + Box::pin(async move { + let mut status = WebhookDeliveratorStatus { + by_rx: Default::default(), + error: None, + }; + if let Err(e) = self.actually_activate(opctx, &mut status).await { + slog::error!(&opctx.log, "webhook delivery failed"; "error" => %e); + status.error = Some(e.to_string()); + } + + serde_json::json!(status) + }) + } +} + +impl WebhookDeliverator { + pub fn new( + datastore: Arc, + cfg: DeliveryConfig, + nexus_id: OmicronZoneUuid, + client: reqwest::Client, + ) -> Self { + Self { datastore, nexus_id, cfg, client } + } + + const MAX_CONCURRENT_RXS: NonZeroU32 = { + match NonZeroU32::new(8) { + Some(nz) => nz, + None => unreachable!(), + } + }; + + async fn actually_activate( + &mut self, + opctx: &OpContext, + status: &mut WebhookDeliveratorStatus, + ) -> Result<(), Error> { + let mut tasks = JoinSet::new(); + let mut paginator = Paginator::new(Self::MAX_CONCURRENT_RXS); + while let Some(p) = paginator.next() { + let rxs = self + .datastore + .webhook_rx_list( + &opctx, + &PaginatedBy::Id(p.current_pagparams()), + ) + .await?; + paginator = p + .found_batch(&rxs, &|WebhookReceiverConfig { rx, .. }| { + rx.id().into_untyped_uuid() + }); + + for rx in rxs { + let rx_id = rx.rx.id(); + let opctx = opctx.child(maplit::btreemap! { + "receiver_id".to_string() => rx_id.to_string(), + "receiver_name".to_string() => rx.rx.name().to_string(), + }); + let deliverator = self.clone(); + tasks.spawn(async move { + let status = match deliverator.rx_deliver(&opctx, rx).await { + Ok(status) => status, + Err(e) => { + slog::error!( + &opctx.log, + "failed to deliver webhook events to a receiver"; + "rx_id" => ?rx_id, + "error" => %e, + ); + WebhookRxDeliveryStatus { + error: Some(e.to_string()), + ..Default::default() + } + } + }; + (rx_id, status) + }); + } + + while let Some(result) = tasks.join_next().await { + let (rx_id, rx_status) = result.expect( + "delivery tasks should not be canceled, and nexus is compiled \ + with `panic=\"abort\"`, so they will not have panicked", + ); + status.by_rx.insert(rx_id, rx_status); + } + } + + Ok(()) + } + + async fn rx_deliver( + &self, + opctx: &OpContext, + WebhookReceiverConfig { rx, secrets, .. }: WebhookReceiverConfig, + ) -> Result { + let mut client = + ReceiverClient::new(&self.client, secrets, &rx, self.nexus_id)?; + + let deliveries = self + .datastore + .webhook_rx_delivery_list_ready(&opctx, &rx.id(), &self.cfg) + .await + .map_err(|e| { + anyhow::anyhow!("could not list ready deliveries: {e}") + })?; + + // Okay, we got everything we need in order to deliver events to this + // receiver. Now, let's actually...do that. + let mut delivery_status = WebhookRxDeliveryStatus { + ready: deliveries.len(), + ..Default::default() + }; + + for (delivery, event_class) in deliveries { + let attempt = (*delivery.attempts) + 1; + let delivery_id = WebhookDeliveryUuid::from(delivery.id); + match self + .datastore + .webhook_delivery_start_attempt( + opctx, + &delivery, + &self.nexus_id, + self.cfg.lease_timeout, + ) + .await + { + Ok(DeliveryAttemptState::Started) => { + slog::trace!(&opctx.log, + "webhook event delivery attempt started"; + "event_id" => %delivery.event_id, + "event_class" => %event_class, + "delivery_id" => %delivery_id, + "attempt" => ?attempt, + ); + } + Ok(DeliveryAttemptState::AlreadyCompleted(time)) => { + slog::debug!( + &opctx.log, + "delivery of this webhook event was already completed at {time:?}"; + "event_id" => %delivery.event_id, + "event_class" => %event_class, + "delivery_id" => %delivery_id, + "time_completed" => ?time, + ); + delivery_status.already_delivered += 1; + continue; + } + Ok(DeliveryAttemptState::InProgress { nexus_id, started }) => { + slog::debug!( + &opctx.log, + "delivery of this webhook event is in progress by another Nexus"; + "event_id" => %delivery.event_id, + "event_class" => %event_class, + "delivery_id" => %delivery_id, + "nexus_id" => %nexus_id, + "time_started" => ?started, + ); + delivery_status.in_progress += 1; + continue; + } + Err(error) => { + slog::error!( + &opctx.log, + "unexpected database error error starting webhook delivery attempt"; + "event_id" => %delivery.event_id, + "event_class" => %event_class, + "delivery_id" => %delivery_id, + "error" => %error, + ); + delivery_status + .delivery_errors + .insert(delivery_id, error.to_string()); + continue; + } + } + + // okay, actually do the thing... + let delivery_attempt = match client + .send_delivery_request(opctx, &delivery, event_class) + .await + { + Ok(delivery) => delivery, + Err(error) => { + delivery_status + .delivery_errors + .insert(delivery_id, format!("{error:?}")); + continue; + } + }; + + if let Err(e) = self + .datastore + .webhook_delivery_finish_attempt( + opctx, + &delivery, + &self.nexus_id, + &delivery_attempt, + ) + .await + { + const MSG: &str = "failed to mark webhook delivery as finished"; + slog::error!( + &opctx.log, + "{MSG}"; + "event_id" => %delivery.event_id, + "event_class" => %event_class, + "delivery_id" => %delivery_id, + "error" => %e, + ); + delivery_status + .delivery_errors + .insert(delivery_id, format!("{MSG}: {e}")); + } + + if delivery_attempt.result + == WebhookDeliveryAttemptResult::Succeeded + { + delivery_status.delivered_ok += 1; + } else { + delivery_status.failed_deliveries.push( + WebhookDeliveryFailure { + delivery_id, + event_id: delivery.event_id.into(), + attempt: delivery_attempt.attempt.0 as usize, + result: delivery_attempt.result.into(), + response_status: delivery_attempt + .response_status + .map(|status| status as u16), + response_duration: delivery_attempt.response_duration, + }, + ); + } + } + + Ok(delivery_status) + } +} diff --git a/nexus/src/app/background/tasks/webhook_dispatcher.rs b/nexus/src/app/background/tasks/webhook_dispatcher.rs new file mode 100644 index 00000000000..7b717a8206f --- /dev/null +++ b/nexus/src/app/background/tasks/webhook_dispatcher.rs @@ -0,0 +1,498 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Background task that dispatches queued webhook events to receivers. + +use crate::app::background::Activator; +use crate::app::background::BackgroundTask; +use futures::future::BoxFuture; +use nexus_db_model::SCHEMA_VERSION; +use nexus_db_model::WebhookDelivery; +use nexus_db_model::WebhookDeliveryTrigger; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::DataStore; +use nexus_db_queries::db::datastore::SQL_BATCH_SIZE; +use nexus_db_queries::db::pagination::Paginator; +use nexus_types::identity::Asset; +use nexus_types::identity::Resource; +use nexus_types::internal_api::background::{ + WebhookDispatched, WebhookDispatcherStatus, WebhookGlobStatus, +}; +use omicron_common::api::external::Error; +use omicron_uuid_kinds::GenericUuid; +use std::sync::Arc; + +pub struct WebhookDispatcher { + datastore: Arc, + deliverator: Activator, +} + +impl BackgroundTask for WebhookDispatcher { + fn activate<'a>( + &'a mut self, + opctx: &'a OpContext, + ) -> BoxFuture<'a, serde_json::Value> { + Box::pin(async move { + let mut status = WebhookDispatcherStatus { + globs_reprocessed: Default::default(), + glob_version: SCHEMA_VERSION, + dispatched: Vec::new(), + errors: Vec::new(), + no_receivers: Vec::new(), + }; + match self.actually_activate(&opctx, &mut status).await { + Ok(_) if status.errors.is_empty() => { + const MSG: &str = + "webhook dispatching completed successfully"; + if !status.dispatched.is_empty() { + slog::info!( + &opctx.log, + "{MSG}"; + "events_dispatched" => status.dispatched.len(), + "events_without_receivers" => status.no_receivers.len(), + ); + } else { + // no sense cluttering up the logs if we didn't do + // anything interesting today + slog::trace!( + &opctx.log, + "{MSG}"; + "events_dispatched" => status.dispatched.len(), + "events_without_receivers" => status.no_receivers.len(), + ); + }; + } + Ok(_) => { + slog::warn!( + &opctx.log, + "webhook dispatching completed with errors"; + "events_dispatched" => status.dispatched.len(), + "events_without_receivers" => status.no_receivers.len(), + "events_failed" => status.errors.len(), + ); + } + Err(error) => { + slog::error!( + &opctx.log, + "webhook dispatching failed"; + "events_dispatched" => status.dispatched.len(), + "events_without_receivers" => status.no_receivers.len(), + "events_failed" => status.errors.len(), + "error" => &error, + ); + status.errors.push(error.to_string()); + } + }; + + // If any new deliveries were dispatched, call the deliverator! + if !status.dispatched.is_empty() { + self.deliverator.activate(); + } + + serde_json::json!(status) + }) + } +} + +impl WebhookDispatcher { + pub fn new(datastore: Arc, deliverator: Activator) -> Self { + Self { datastore, deliverator } + } + + async fn actually_activate( + &mut self, + opctx: &OpContext, + status: &mut WebhookDispatcherStatus, + ) -> Result<(), Error> { + // Before dispatching any events, ensure that all webhook globs are up + // to date with the current schema version. + let mut paginator = Paginator::new(SQL_BATCH_SIZE); + let mut globs_reprocessed = 0; + let mut globs_failed = 0; + let mut globs_already_reprocessed = 0; + while let Some(p) = paginator.next() { + let batch = self + .datastore + .webhook_glob_list_outdated(opctx, &p.current_pagparams()) + .await + .map_err(|e| { + e.internal_context("failed to list outdated webhook globs") + })?; + paginator = p.found_batch(&batch, &|glob| { + (glob.rx_id.into_untyped_uuid(), glob.glob.glob.clone()) + }); + for glob in batch { + let result = self + .datastore + .webhook_glob_reprocess(opctx, &glob) + .await + .map_err(|e| { + globs_failed += 1; + slog::warn!( + &opctx.log, + "failed to reprocess webhook glob"; + "rx_id" => ?glob.rx_id, + "glob" => ?glob.glob.glob, + "glob_version" => %glob.schema_version.0, + "error" => %e, + ); + e.to_string() + }) + .inspect(|status| match status { + WebhookGlobStatus::Reprocessed { .. } => { + globs_reprocessed += 1 + } + WebhookGlobStatus::AlreadyReprocessed => { + globs_already_reprocessed += 1 + } + }); + let rx_statuses = status + .globs_reprocessed + .entry(glob.rx_id.into()) + .or_default(); + rx_statuses.insert(glob.glob.glob, result); + } + } + if globs_failed > 0 { + slog::warn!( + &opctx.log, + "webhook glob reprocessing completed with failures"; + "globs_failed" => ?globs_failed, + "globs_reprocessed" => ?globs_reprocessed, + "globs_already_reprocessed" => ?globs_already_reprocessed, + ); + } else if globs_reprocessed > 0 { + slog::info!( + &opctx.log, + "webhook glob reprocessed"; + "globs_reprocessed" => ?globs_reprocessed, + "globs_already_reprocessed" => ?globs_already_reprocessed, + ); + } + + // Select the next event that has yet to be dispatched in order of + // creation, until there are none left in need of dispatching. + while let Some(event) = + self.datastore.webhook_event_select_next_for_dispatch(opctx).await? + { + slog::trace!( + &opctx.log, + "dispatching webhook event..."; + "event_id" => ?event.id(), + "event_class" => %event.event_class, + ); + + // Okay, we found an event that needs to be dispatched. Next, get + // list the webhook receivers subscribed to this event class and + // create delivery records for them. + let rxs = match self + .datastore + .webhook_rx_list_subscribed_to_event(&opctx, event.event_class) + .await + { + Ok(rxs) => rxs, + Err(error) => { + const MSG: &str = + "failed to list webhook receivers subscribed to event"; + slog::error!( + &opctx.log, + "{MSG}"; + "event_id" => ?event.id(), + "event_class" => %event.event_class, + "error" => &error, + ); + status.errors.push(format!( + "{MSG} {} ({}): {error}", + event.id(), + event.event_class + )); + // We weren't able to find receivers for this event, so + // *don't* mark it as dispatched --- it's someone else's + // problem now. + continue; + } + }; + + let deliveries: Vec = rxs.into_iter().map(|(rx, sub)| { + slog::trace!(&opctx.log, "webhook receiver is subscribed to event"; + "rx_name" => %rx.name(), + "rx_id" => ?rx.id(), + "event_id" => ?event.id(), + "event_class" => %event.event_class, + "glob" => ?sub.glob, + ); + WebhookDelivery::new(&event, &rx.id(), WebhookDeliveryTrigger::Event) + }).collect(); + + let subscribed = if !deliveries.is_empty() { + let subscribed = deliveries.len(); + let dispatched = match self + .datastore + .webhook_delivery_create_batch(&opctx, deliveries) + .await + { + Ok(created) => created, + Err(error) => { + slog::error!(&opctx.log, "failed to insert webhook deliveries"; + "event_id" => ?event.id(), + "event_class" => %event.event_class, + "error" => %error, + "num_subscribed" => ?subscribed, + ); + status.errors.push(format!("failed to insert {subscribed} webhook deliveries for event {} ({}): {error}", event.id(), event.event_class)); + // We weren't able to create deliveries for this event, so + // *don't* mark it as dispatched. + continue; + } + }; + status.dispatched.push(WebhookDispatched { + event_id: event.id(), + subscribed, + dispatched, + }); + slog::debug!( + &opctx.log, + "dispatched webhook event"; + "event_id" => ?event.id(), + "event_class" => %event.event_class, + "num_subscribed" => subscribed, + "num_dispatched" => dispatched, + ); + subscribed + } else { + slog::debug!( + &opctx.log, + "no webhook receivers subscribed to event"; + "event_id" => ?event.id(), + "event_class" => %event.event_class, + ); + status.no_receivers.push(event.id()); + 0 + }; + + if let Err(error) = self + .datastore + .webhook_event_mark_dispatched(&opctx, &event.id(), subscribed) + .await + { + slog::error!(&opctx.log, "failed to mark webhook event as dispatched"; + "event_id" => ?event.id(), + "event_class" => %event.event_class, + "error" => %error, + "num_subscribed" => subscribed, + ); + status.errors.push(format!("failed to mark webhook event {} ({}) as dispatched: {error}", event.id(), event.event_class)); + } + } + Ok(()) + } +} + +#[cfg(test)] +mod test { + use super::*; + use async_bb8_diesel::AsyncRunQueryDsl; + use diesel::prelude::*; + use nexus_db_queries::db; + use nexus_test_utils_macros::nexus_test; + use omicron_common::api::external::IdentityMetadataCreateParams; + use omicron_uuid_kinds::WebhookEventUuid; + use omicron_uuid_kinds::WebhookReceiverUuid; + + type ControlPlaneTestContext = + nexus_test_utils::ControlPlaneTestContext; + + // Tests that stale webhook event class globs are reprocessed prior to event + // dispatching. + #[nexus_test(server = crate::Server)] + async fn test_glob_reprocessing(cptestctx: &ControlPlaneTestContext) { + use nexus_db_model::schema::webhook_receiver::dsl as rx_dsl; + use nexus_db_model::schema::webhook_rx_event_glob::dsl as glob_dsl; + use nexus_db_model::schema::webhook_rx_subscription::dsl as subscription_dsl; + + let nexus = &cptestctx.server.server_context().nexus; + let datastore = nexus.datastore(); + let opctx = OpContext::for_tests( + cptestctx.logctx.log.clone(), + datastore.clone(), + ); + let rx_id = WebhookReceiverUuid::new_v4(); + let conn = datastore + .pool_connection_for_tests() + .await + .expect("can't get ye pool_connection_for_tests"); + + // Unfortunately, we've gotta hand-create the receiver and its + // subscriptions, so that we can create a set of globs that differs from + // those generated by the currrent schema. + diesel::insert_into(rx_dsl::webhook_receiver) + .values(db::model::WebhookReceiver { + identity: db::model::WebhookReceiverIdentity::new( + rx_id, + IdentityMetadataCreateParams { + name: "my-cool-webhook".parse().unwrap(), + description: "it's my cool webhook".to_string(), + }, + ), + + endpoint: "http://webhooks.elizas.website".parse().unwrap(), + secret_gen: db::model::Generation::new(), + subscription_gen: db::model::Generation::new(), + }) + .execute_async(&*conn) + .await + .expect("receiver entry should create"); + + const GLOB_PATTERN: &str = "test.*.bar"; + let glob = GLOB_PATTERN + .parse::() + .expect("'test.*.bar should be an acceptable glob"); + let mut glob = db::model::WebhookRxEventGlob::new(rx_id, glob); + // Just make something up that's obviously outdated... + glob.schema_version = db::model::SemverVersion::new(100, 0, 0); + diesel::insert_into(glob_dsl::webhook_rx_event_glob) + .values(glob.clone()) + .execute_async(&*conn) + .await + .expect("should insert glob entry"); + diesel::insert_into(subscription_dsl::webhook_rx_subscription) + .values( + // Pretend `test.quux.bar` doesn't exist yet + db::model::WebhookRxSubscription::for_glob( + &glob, + db::model::WebhookEventClass::TestFooBar, + ), + ) + .execute_async(&*conn) + .await + .expect("should insert glob entry"); + // Also give the webhook receiver a secret just so everything + // looks normalish. + let (authz_rx, _) = db::lookup::LookupPath::new(&opctx, datastore) + .webhook_receiver_id(rx_id) + .fetch() + .await + .expect("webhook rx should be there"); + datastore + .webhook_rx_secret_create( + &opctx, + &authz_rx, + db::model::WebhookSecret::new(rx_id, "TRUSTNO1".to_string()), + ) + .await + .expect("cant insert ye secret???"); + + // OKAY GREAT NOW THAT WE DID ALL THAT STUFF let's see if it actually + // works... + + // N.B. that we are using the `DataStore::webhook_event_create` method + // rather than `Nexus::webhook_event_publish` (the expected entrypoint + // to publishing a webhook event) because `webhook_event_publish` also + // activates the dispatcher task, and for this test, we would like to be + // responsible for activating it. + let event_id = WebhookEventUuid::new_v4(); + datastore + .webhook_event_create( + &opctx, + event_id, + db::model::WebhookEventClass::TestQuuxBar, + serde_json::json!({"msg": "help im trapped in a webhook event factory"}), + ) + .await + .expect("creating the event should work"); + + // okay now do the thing + let mut status = WebhookDispatcherStatus { + globs_reprocessed: Default::default(), + glob_version: SCHEMA_VERSION, + dispatched: Vec::new(), + errors: Vec::new(), + no_receivers: Vec::new(), + }; + + let mut task = WebhookDispatcher::new( + datastore.clone(), + nexus.background_tasks.task_webhook_deliverator.clone(), + ); + task.actually_activate(&opctx, &mut status) + .await + .expect("activation should succeed"); + + // The globs should have been reprocessed, creating a subscription to + // `test.quux.bar`. + let subscriptions = subscription_dsl::webhook_rx_subscription + .filter(subscription_dsl::rx_id.eq(rx_id.into_untyped_uuid())) + .load_async::(&*conn) + .await + .expect("should be able to get subscriptions") + .into_iter() + .map(|sub| { + // throw away the "time_created" fields so that assertions are + // easier... + assert_eq!( + sub.glob.as_deref(), + Some(GLOB_PATTERN), + "found a subscription to {} that was not from our glob: {sub:?}", + sub.event_class, + ); + sub.event_class + }).collect::>(); + assert_eq!(subscriptions.len(), 2); + assert!( + subscriptions.contains(&db::model::WebhookEventClass::TestFooBar), + "subscription to test.foo.bar should exist; subscriptions: \ + {subscriptions:?}", + ); + assert!( + subscriptions.contains(&db::model::WebhookEventClass::TestQuuxBar), + "subscription to test.quux.bar should exist; subscriptions: \ + {subscriptions:?}", + ); + let rx_reprocessed_globs = status.globs_reprocessed.get(&rx_id).expect( + "expected there to be an entry in status.globs_reprocessed \ + for our glob", + ); + let reprocessed_entry = dbg!(rx_reprocessed_globs).get(GLOB_PATTERN); + assert!( + matches!( + reprocessed_entry, + Some(Ok(WebhookGlobStatus::Reprocessed { .. })) + ), + "glob status should be 'reprocessed'" + ); + + // There should now be a delivery entry for the event we published. + // + // Use `webhook_rx_delivery_list` rather than + // `webhook_rx_delivery_list_ready`, even though it's a bit more + // complex due to requiring pagination. This is because the + // webhook_deliverator background task may have activated and might + // attempt to deliver the event, making it no longer show up in the + // "ready" query. + let mut paginator = Paginator::new(db::datastore::SQL_BATCH_SIZE); + let mut deliveries = Vec::new(); + while let Some(p) = paginator.next() { + let batch = datastore + .webhook_rx_delivery_list( + &opctx, + &rx_id, + &[WebhookDeliveryTrigger::Event], + Vec::new(), + &p.current_pagparams(), + ) + .await + .unwrap(); + paginator = p.found_batch(&batch, &|(d, _, _)| { + (d.time_created, d.id.into_untyped_uuid()) + }); + deliveries.extend(batch); + } + let event = + deliveries.iter().find(|(d, _, _)| d.event_id == event_id.into()); + assert!( + dbg!(event).is_some(), + "delivery entry for dispatched event must exist" + ); + } +} diff --git a/nexus/src/app/mod.rs b/nexus/src/app/mod.rs index 03f41cc4261..ac69cc9f68f 100644 --- a/nexus/src/app/mod.rs +++ b/nexus/src/app/mod.rs @@ -92,6 +92,7 @@ mod volume; mod vpc; mod vpc_router; mod vpc_subnet; +mod webhook; // Sagas are not part of the "Nexus" implementation, but they are // application logic. @@ -171,6 +172,13 @@ pub struct Nexus { /// Client to the timeseries database. timeseries_client: oximeter_db::Client, + /// `reqwest` client used for webhook delivery requests. + /// + /// This lives on the Nexus struct as we would like to use the same client + /// pool for the webhook deliverator background task and the webhook probe + /// API. + webhook_delivery_client: reqwest::Client, + /// Contents of the trusted root role for the TUF repository. #[allow(dead_code)] updates_config: Option, @@ -470,6 +478,11 @@ impl Nexus { )) }; + let webhook_delivery_client = + webhook::delivery_client(&external_resolver).map_err(|e| { + format!("failed to build webhook delivery client: {e}") + })?; + let nexus = Nexus { id: config.deployment.id, rack_id, @@ -484,6 +497,7 @@ impl Nexus { populate_status, reqwest_client, timeseries_client, + webhook_delivery_client, updates_config: config.pkg.updates.clone(), tunables: config.pkg.tunables.clone(), opctx_alloc: OpContext::for_background( @@ -564,6 +578,9 @@ impl Nexus { resolver, saga_starter: task_nexus.sagas.clone(), producer_registry: task_registry, + webhook_delivery_client: task_nexus + .webhook_delivery_client + .clone(), saga_recovery: SagaRecoveryHelpers { recovery_opctx: saga_recovery_opctx, diff --git a/nexus/src/app/webhook.rs b/nexus/src/app/webhook.rs new file mode 100644 index 00000000000..f0fcb25012b --- /dev/null +++ b/nexus/src/app/webhook.rs @@ -0,0 +1,1030 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Webhooks +//! +//! # Webhooks: Theory and Practice +//! +//! [RFD 538] describes the user-facing interface for Oxide rack webhooks. +//! However, that RFD does not describe internal implementation details of the +//! webhook implementation, the key players, their roles, and interactions. +//! Instead, the implementation of webhooks are discussed here. +//! +//! ## Dramatis Personae +//! +//! There are two key elements in our webhook design: +//! +//! + **Webhook receivers** are the endpoints external to the rack to which +//! webhook requests are sent. In the context of the control plane, the term +//! "webhook receiver" refers to the configuration and state associated with +//! such an endpoint. Most other entities in the webhook API are chiild +//! resources of the [`WebhookReceiver`] API resource. +//! +//! + **Webhook events** represent events in the system for which webhook +//! notifications are generated and sent to receivers. The control plane +//! calls the [`Nexus::webhook_event_publish`] method to record a new event +//! and publish it to receivers. +//! +//! Events are categorized into [event classes], as described in RFD +//! 538. Receivers *subscribe* to these classes, indicating that they wish to +//! when an event with a particular class occurs. +//! +//! Two background tasks implement the reliable persistent workflow of +//! determining what events should be sent to what receiver, and performing the +//! actual HTTP requests to send the event to the receiver: +//! +//! + The `webhook_dispatcher` task is responsible for *dispatching* events to +//! receivers. For each event that has not yet been dispatched, the task +//! queries the database for webhook receivers that have subscribed to that +//! event, and creates a *delivery record* in the `webhook_delivery` table, +//! indicating that the event should be sent to that receiver. +//! +//! + The `webhook_deliverator`[^1] task reads these delivery records and sends +//! HTTP requests to the receiver endpoint for each delivery that is +//! currently in flight. The deliverator is responsible for recording the +//! status of each *delivery attempt*. Retries and retry backoff are +//! the responsibility of the deliverator. +//! +//! ## Event Subscriptions +//! +//! A receiver's subscriptions take one of two forms: +//! +//! + **Exact** subscriptions are when a receiver subscribes to a specific event +//! class string. These are represented by entries in the +//! `webhook_rx_event_subscription` table in CockroachDB. +//! +//! + **Glob** subscriptions include wildcard segments that may match multiple +//! values. The globbing syntax is discussed in greater detail in RFD 538. +//! +//! We implement glob subscriptions by evaluating the glob against the list of +//! known webhook event classes when the glob is *created*, and creating +//! corresponding exact subscriptions for each event class that matches the +//! glob. This way, we need not perform complex pattern matching in the +//! database when dispatching an event, and can instead simply query for the +//! existence of a record in the `webhook_rx_event_subscription` table. Each +//! exact subscription entry generated by a glob records which glob it came +//! from, which is used when a receiver's subscriptions change. +//! +//! Because the generation of exact subscriptions from globs occurs when the +//! subscription is created, globs must be *reprocessed* when new event classes +//! are added to the system, generating new exact subscriptions for any +//! newly-added event classes that match the glob, and potentially removing +//! subscriptions to any defunct event classes This could occur in any software +//! release where new kinds of events are implemented. Therefore, when glob +//! subscriptions are created, we record the database schema version as part of +//! that glob subscription. Because event classes are represented as a SQL +//! `enum` type, we know that any change to the event classes should change the +//! database schema version as well. This way, we can detect whether a glob's +//! list of subscriptions are up to date. The `webhook_dispatcher` background +//! task will query the database for any globs which were last reprocessed at +//! earlier database schema versions and reprocess those globs prior to +//! attempting to dispatch events to receivers. +//! +//! ## Deliveries, Delivery Attempts, and Liveness Probes +//! +//! A *delivery* represents the process of sending HTTP request(s) representing +//! a webhook event to a receiver. Failed HTTP requests are retried up to two +//! times, so a delivery may consist of up to three *delivery attempts*. +//! Each time the `webhook_deliverator` background task is activated, it +//! searches for deliveries which have not yet succeeded or permanently failed, +//! which are not presently being delivered by another Nexus, and for which the +//! backoff period for any prior failed delivery attempts has elapsed. It then +//! sends an HTTP request to the webhook receiver, and records the result, +//! creating a new `webhook_delivery_attempt` record and updating the +//! `webhook_delivery` record. +//! +//! Multiple Nexii use an advisory lease mechanism to avoid attempting to +//! deliver the same event simultaneously, by setting their UUID and a timestamp +//! on the `webhook_delivery` record. Because webhook delivery is +//! at-least-once, this lease mechanism is NOT REQUIRED FOR CORRECTNESS IN ANY +//! WAY, Andrew. :) Instead, it serves only to reduce duplicate work. +//! Therefore, should a Nexus acquire a lease on a delivery and fail to either +//! complete the delivery attempt within a period of time, another Nexus is +//! permitted to clobber its lease. +//! +//! Deliveries are created either because an event occurred and a webhook +//! receiver is subscribed to it, or because we were asked to resend a previous +//! delivery that failed permanently by exhausting its retry budget. Initial +//! deliveries are created by activations of the webhook dispatcher background +//! task. When creating a delivery, the data associated with the event record +//! in the `webhook_event` table is processed to produce the data payload that +//! will actually be sent to the receiver. Data which the receiver's service +//! account is not authorized to read is filtered out of the payload.[^2] +//! +//! Re-delivery of an event can be requested either via the event resend API +//! endpoint, or by a *liveness probe* succeeding. Liveness probes are +//! synthetic delivery requests sent to a webhook receiver to check whether it's +//! actually able to receive an event. They are triggered via the +//! [`Nexus::webhook_receiver_probe`] API endpoint. A probe may optionally +//! request that any events for which all past deliveries have failed be resent +//! if it succeeds. Delivery records are also created to represent the outcome +//! of a probe. +//! +//! [RFD 538]: https://rfd.shared.oxide.computer/538 +//! [event classes]: https://rfd.shared.oxide.computer/rfd/538#_event_classes +//! +//! [^1]: Read _Snow Crash_, if you haven't already. +//! [^2]: Presently, all weebhook receivers have the fleet.viewer role, so +//! this "filtering" doesn't actually do anything. When webhook receivers +//! with more restrictive permissions are implemented, please rememvber to +//! delete this footnote. + +use crate::Nexus; +use crate::app::external_dns; +use anyhow::Context; +use chrono::DateTime; +use chrono::TimeDelta; +use chrono::Utc; +use hmac::{Hmac, Mac}; +use http::HeaderName; +use http::HeaderValue; +use nexus_db_queries::authz; +use nexus_db_queries::context::OpContext; +use nexus_db_queries::db; +use nexus_db_queries::db::lookup; +use nexus_db_queries::db::lookup::LookupPath; +use nexus_db_queries::db::model::SqlU8; +use nexus_db_queries::db::model::WebhookDelivery; +use nexus_db_queries::db::model::WebhookDeliveryAttempt; +use nexus_db_queries::db::model::WebhookDeliveryAttemptResult; +use nexus_db_queries::db::model::WebhookDeliveryState; +use nexus_db_queries::db::model::WebhookDeliveryTrigger; +use nexus_db_queries::db::model::WebhookEvent; +use nexus_db_queries::db::model::WebhookEventClass; +use nexus_db_queries::db::model::WebhookReceiver; +use nexus_db_queries::db::model::WebhookReceiverConfig; +use nexus_db_queries::db::model::WebhookSecret; +use nexus_types::external_api::params; +use nexus_types::external_api::views; +use nexus_types::identity::Resource; +use omicron_common::api::external::CreateResult; +use omicron_common::api::external::DataPageParams; +use omicron_common::api::external::DeleteResult; +use omicron_common::api::external::Error; +use omicron_common::api::external::ListResultVec; +use omicron_common::api::external::LookupResult; +use omicron_common::api::external::NameOrId; +use omicron_common::api::external::UpdateResult; +use omicron_common::api::external::http_pagination::PaginatedBy; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::OmicronZoneUuid; +use omicron_uuid_kinds::WebhookDeliveryUuid; +use omicron_uuid_kinds::WebhookEventUuid; +use omicron_uuid_kinds::WebhookReceiverUuid; +use omicron_uuid_kinds::WebhookSecretUuid; +use sha2::Sha256; +use std::sync::Arc; +use std::time::Duration; +use std::time::Instant; +use uuid::Uuid; + +impl Nexus { + /// Publish a new webhook event, with the provided `id`, `event_class`, and + /// JSON data payload. + /// + /// If this method returns `Ok`, the event has been durably recorded in + /// CockroachDB. Once the new event record is inserted into the database, + /// the webhook dispatcher background task is activated to dispatch the + /// event to receivers. However, if (for whatever reason) this Nexus fails + /// to do that, the event remains durably in the database to be dispatched + /// and delivered by someone else. + pub async fn webhook_event_publish( + &self, + opctx: &OpContext, + id: WebhookEventUuid, + event_class: WebhookEventClass, + event: serde_json::Value, + ) -> Result { + let event = self + .datastore() + .webhook_event_create(opctx, id, event_class, event) + .await?; + slog::debug!( + &opctx.log, + "enqueued webhook event"; + "event_id" => ?id, + "event_class" => %event.event_class, + "time_created" => ?event.identity.time_created, + ); + + // Once the event has been inserted, activate the dispatcher task to + // ensure its propagated to receivers. + self.background_tasks.task_webhook_dispatcher.activate(); + + Ok(event) + } + + // + // Lookups + // + + pub fn webhook_receiver_lookup<'a>( + &'a self, + opctx: &'a OpContext, + webhook_selector: params::WebhookReceiverSelector, + ) -> LookupResult> { + match webhook_selector.receiver { + NameOrId::Id(id) => { + let webhook = LookupPath::new(opctx, &self.db_datastore) + .webhook_receiver_id( + WebhookReceiverUuid::from_untyped_uuid(id), + ); + Ok(webhook) + } + NameOrId::Name(name) => { + let webhook = LookupPath::new(opctx, &self.db_datastore) + .webhook_receiver_name_owned(name.into()); + Ok(webhook) + } + } + } + + pub fn webhook_secret_lookup<'a>( + &'a self, + opctx: &'a OpContext, + secret_selector: params::WebhookSecretSelector, + ) -> LookupResult> { + let lookup = LookupPath::new(&opctx, self.datastore()) + .webhook_secret_id(WebhookSecretUuid::from_untyped_uuid( + secret_selector.secret_id, + )); + Ok(lookup) + } + + pub fn webhook_event_lookup<'a>( + &'a self, + opctx: &'a OpContext, + params::WebhookEventSelector { event_id }: params::WebhookEventSelector, + ) -> LookupResult> { + let event = LookupPath::new(opctx, &self.db_datastore) + .webhook_event_id(WebhookEventUuid::from_untyped_uuid(event_id)); + Ok(event) + } + + // + // Event class API + // + pub async fn webhook_event_class_list( + &self, + opctx: &OpContext, + filter: params::EventClassFilter, + pagparams: DataPageParams<'_, params::EventClassPage>, + ) -> ListResultVec { + opctx + .authorize( + authz::Action::ListChildren, + &authz::WEBHOOK_EVENT_CLASS_LIST, + ) + .await?; + Self::actually_list_event_classes(filter, pagparams) + } + + // This is factored out to avoid having to make a whole Nexus to test it. + fn actually_list_event_classes( + params::EventClassFilter { filter }: params::EventClassFilter, + pagparams: DataPageParams<'_, params::EventClassPage>, + ) -> ListResultVec { + let regex = if let Some(glob) = filter { + let glob = db::model::WebhookGlob::try_from(glob)?; + let re = regex::Regex::new(&glob.regex).map_err(|e| { + // This oughtn't happen, provided the code for producing the + // regex for a glob is correct. + Error::InternalError { + internal_message: format!( + "valid event class globs ({glob:?}) should always \ + produce a valid regex, and yet: {e:?}" + ), + } + })?; + Some(re) + } else { + None + }; + + // If we're resuming a previous scan, figure out where to start. + let start = if let Some(params::EventClassPage { last_seen }) = + pagparams.marker + { + let start = WebhookEventClass::ALL_CLASSES + .iter() + .enumerate() + .find_map(|(idx, class)| { + if class.as_str() == last_seen { Some(idx) } else { None } + }); + match start { + Some(start) => start + 1, + None => return Ok(Vec::new()), + } + } else { + 0 + }; + + // This shouldn't ever happen, but...don't panic I guess. + if start > WebhookEventClass::ALL_CLASSES.len() { + return Ok(Vec::new()); + } + + let result = WebhookEventClass::ALL_CLASSES[start..] + .iter() + .filter_map(|&class| { + // Skip test classes, as they should not be used in the public + // API, except in test builds, where we need them + // for, you know... testing... + if !cfg!(test) && class.is_test() { + return None; + } + if let Some(ref regex) = regex { + if !regex.is_match(class.as_str()) { + return None; + } + } + Some(class.into()) + }) + .take(pagparams.limit.get() as usize) + .collect::>(); + Ok(result) + } + + // + // Receiver configuration API methods + // + + pub async fn webhook_receiver_list( + &self, + opctx: &OpContext, + pagparams: &PaginatedBy<'_>, + ) -> ListResultVec { + opctx.authorize(authz::Action::ListChildren, &authz::FLEET).await?; + self.datastore().webhook_rx_list(opctx, pagparams).await + } + + pub async fn webhook_receiver_config_fetch( + &self, + opctx: &OpContext, + rx: lookup::WebhookReceiver<'_>, + ) -> LookupResult { + let (authz_rx, rx) = rx.fetch().await?; + let (events, secrets) = + self.datastore().webhook_rx_config_fetch(opctx, &authz_rx).await?; + Ok(WebhookReceiverConfig { rx, secrets, events }) + } + + pub async fn webhook_receiver_create( + &self, + opctx: &OpContext, + params: params::WebhookCreate, + ) -> CreateResult { + // TODO(eliza): validate endpoint URI; reject underlay network IPs for + // SSRF prevention... + self.datastore().webhook_rx_create(&opctx, params).await + } + + pub async fn webhook_receiver_update( + &self, + opctx: &OpContext, + rx: lookup::WebhookReceiver<'_>, + params: params::WebhookReceiverUpdate, + ) -> UpdateResult<()> { + let (authz_rx, rx) = rx.fetch_for(authz::Action::Modify).await?; + let _ = self + .datastore() + .webhook_rx_update(opctx, &authz_rx, &rx, params) + .await?; + Ok(()) + } + + pub async fn webhook_receiver_delete( + &self, + opctx: &OpContext, + rx: lookup::WebhookReceiver<'_>, + ) -> DeleteResult { + let (authz_rx, db_rx) = rx.fetch_for(authz::Action::Delete).await?; + self.datastore().webhook_rx_delete(&opctx, &authz_rx, &db_rx).await + } + + // + // Receiver secret API methods + // + + pub async fn webhook_receiver_secrets_list( + &self, + opctx: &OpContext, + rx: lookup::WebhookReceiver<'_>, + ) -> ListResultVec { + let (authz_rx,) = rx.lookup_for(authz::Action::ListChildren).await?; + self.datastore().webhook_rx_secret_list(opctx, &authz_rx).await + } + + pub async fn webhook_receiver_secret_add( + &self, + opctx: &OpContext, + rx: lookup::WebhookReceiver<'_>, + secret: String, + ) -> Result { + let (authz_rx,) = rx.lookup_for(authz::Action::CreateChild).await?; + let secret = WebhookSecret::new(authz_rx.id(), secret); + let WebhookSecret { identity, .. } = self + .datastore() + .webhook_rx_secret_create(opctx, &authz_rx, secret) + .await?; + let secret_id = identity.id; + slog::info!( + &opctx.log, + "added secret to webhook receiver"; + "rx_id" => ?authz_rx.id(), + "secret_id" => ?secret_id, + ); + Ok(views::WebhookSecretId { id: secret_id.into_untyped_uuid() }) + } + + pub async fn webhook_receiver_secret_delete( + &self, + opctx: &OpContext, + secret: lookup::WebhookSecret<'_>, + ) -> DeleteResult { + let (authz_rx, authz_secret) = + secret.lookup_for(authz::Action::Delete).await?; + self.datastore() + .webhook_rx_secret_delete(&opctx, &authz_rx, &authz_secret) + .await?; + slog::info!( + &opctx.log, + "deleted secret from webhook receiver"; + "rx_id" => ?authz_rx.id(), + "secret_id" => ?authz_secret.id(), + ); + Ok(()) + } + + // + // Receiver event delivery API methods + // + + pub async fn webhook_receiver_probe( + &self, + opctx: &OpContext, + rx: lookup::WebhookReceiver<'_>, + params: params::WebhookProbe, + ) -> Result { + let (authz_rx, rx) = rx.fetch_for(authz::Action::ListChildren).await?; + let rx_id = authz_rx.id(); + let datastore = self.datastore(); + let secrets = + datastore.webhook_rx_secret_list(opctx, &authz_rx).await?; + let mut client = ReceiverClient::new( + &self.webhook_delivery_client, + secrets, + &rx, + self.id, + )?; + let mut delivery = WebhookDelivery::new_probe(&rx_id, &self.id); + + const CLASS: WebhookEventClass = WebhookEventClass::Probe; + + let attempt = + match client.send_delivery_request(opctx, &delivery, CLASS).await { + Ok(attempt) => attempt, + Err(e) => { + slog::error!( + &opctx.log, + "failed to probe webhook receiver"; + "rx_id" => %authz_rx.id(), + "rx_name" => %rx.name(), + "delivery_id" => %delivery.id, + "error" => %e, + ); + return Err(Error::InternalError { + internal_message: e.to_string(), + }); + } + }; + + // Update the delivery state based on the result of the probe attempt. + // Otherwise, it will still appear "pending", which is obviously wrong. + delivery.state = if attempt.result.is_failed() { + WebhookDeliveryState::Failed + } else { + WebhookDeliveryState::Delivered + }; + + let resends_started = if params.resend + && attempt.result == WebhookDeliveryAttemptResult::Succeeded + { + slog::debug!( + &opctx.log, + "webhook liveness probe succeeded, resending failed deliveries..."; + "rx_id" => %authz_rx.id(), + "rx_name" => %rx.name(), + "delivery_id" => %delivery.id, + ); + + let deliveries = datastore + .webhook_rx_list_resendable_events(opctx, &rx_id) + .await + .map_err(|e| { + e.internal_context("error listing events to resend") + })? + .into_iter() + .map(|event| { + WebhookDelivery::new( + &event, + &rx_id, + WebhookDeliveryTrigger::Resend, + ) + }) + .collect::>(); + slog::trace!( + &opctx.log, + "found {} failed events to resend", deliveries.len(); + "rx_id" => %authz_rx.id(), + "rx_name" => %rx.name(), + "delivery_id" => %delivery.id, + ); + let started = datastore + .webhook_delivery_create_batch(&opctx, deliveries) + .await + .map_err(|e| { + e.internal_context( + "error creating deliveries to resend failed events", + ) + })?; + + if started > 0 { + slog::info!( + &opctx.log, + "webhook liveness probe succeeded, created {started} re-deliveries"; + "rx_id" => %authz_rx.id(), + "rx_name" => %rx.name(), + "delivery_id" => %delivery.id, + ); + // If new deliveries were created, activate the webhook + // deliverator background task to start actually delivering + // them. + self.background_tasks.task_webhook_deliverator.activate(); + } + Some(started) + } else { + None + }; + + Ok(views::WebhookProbeResult { + probe: delivery.to_api_delivery(CLASS, &[attempt]), + resends_started, + }) + } + + pub async fn webhook_receiver_event_resend( + &self, + opctx: &OpContext, + rx: lookup::WebhookReceiver<'_>, + event: lookup::WebhookEvent<'_>, + ) -> CreateResult { + let (authz_rx,) = rx.lookup_for(authz::Action::CreateChild).await?; + let (authz_event, event) = event.fetch().await?; + let datastore = self.datastore(); + + let is_subscribed = datastore + .webhook_rx_is_subscribed_to_event(opctx, &authz_rx, &authz_event) + .await?; + if !is_subscribed { + return Err(Error::invalid_request(format!( + "cannot resend event: receiver is not subscribed to the '{}' \ + event class", + event.event_class, + ))); + } + + let delivery = WebhookDelivery::new( + &event, + &authz_rx.id(), + WebhookDeliveryTrigger::Resend, + ); + let delivery_id = delivery.id.into(); + + if let Err(e) = + datastore.webhook_delivery_create_batch(opctx, vec![delivery]).await + { + slog::error!( + &opctx.log, + "failed to create new delivery to resend webhook event"; + "rx_id" => ?authz_rx.id(), + "event_id" => ?authz_event.id(), + "event_class" => %event.event_class, + "delivery_id" => ?delivery_id, + "error" => %e, + ); + return Err(e); + } + + slog::info!( + &opctx.log, + "resending webhook event"; + "rx_id" => ?authz_rx.id(), + "event_id" => ?authz_event.id(), + "event_class" => %event.event_class, + "delivery_id" => ?delivery_id, + ); + + self.background_tasks.task_webhook_deliverator.activate(); + Ok(delivery_id) + } + + pub async fn webhook_receiver_delivery_list( + &self, + opctx: &OpContext, + rx: lookup::WebhookReceiver<'_>, + filter: params::WebhookDeliveryStateFilter, + pagparams: &DataPageParams<'_, (DateTime, Uuid)>, + ) -> ListResultVec { + let (authz_rx,) = rx.lookup_for(authz::Action::ListChildren).await?; + let only_states = if filter.include_all() { + Vec::new() + } else { + let mut states = Vec::with_capacity(3); + if filter.include_failed() { + states.push(WebhookDeliveryState::Failed); + } + if filter.include_pending() { + states.push(WebhookDeliveryState::Pending); + } + if filter.include_delivered() { + states.push(WebhookDeliveryState::Delivered); + } + states + }; + let deliveries = self + .datastore() + .webhook_rx_delivery_list( + opctx, + &authz_rx.id(), + // No probes; they could have their own list endpoint later... + &[ + WebhookDeliveryTrigger::Event, + WebhookDeliveryTrigger::Resend, + ], + only_states, + pagparams, + ) + .await? + .into_iter() + .map(|(delivery, class, attempts)| { + delivery.to_api_delivery(class, &attempts) + }) + .collect(); + Ok(deliveries) + } +} + +/// Construct a [`reqwest::Client`] configured for webhook delivery requests. +pub(super) fn delivery_client( + external_dns: &Arc, +) -> Result { + reqwest::Client::builder() + // Per [RFD 538 § 4.3.1][1], webhook delivery does *not* follow + // redirects. + // + // [1]: https://rfd.shared.oxide.computer/rfd/538#_success + .redirect(reqwest::redirect::Policy::none()) + // Per [RFD 538 § 4.3.2][1], the client must be able to connect to a + // webhook receiver endpoint within 10 seconds, or the delivery is + // considered failed. + // + // [1]: https://rfd.shared.oxide.computer/rfd/538#delivery-failure + .connect_timeout(Duration::from_secs(10)) + // Per [RFD 538 § 4.3.2][1], a 30-second timeout is applied to + // each webhook delivery request. + // + // [1]: https://rfd.shared.oxide.computer/rfd/538#delivery-failure + .timeout(Duration::from_secs(30)) + .dns_resolver(external_dns.clone()) + .build() +} + +/// Everything necessary to send a delivery request to a webhook receiver. +/// +/// This is its' own thing, rather than part of the `webhook_deliverator` +/// background task, as it is used both by the deliverator RPW and by the Nexus +/// API in the liveness probe endpoint. +pub(crate) struct ReceiverClient<'a> { + client: &'a reqwest::Client, + rx: &'a WebhookReceiver, + secrets: Vec<(WebhookSecretUuid, Hmac)>, + hdr_rx_id: http::HeaderValue, + nexus_id: OmicronZoneUuid, +} + +impl<'a> ReceiverClient<'a> { + pub(crate) fn new( + client: &'a reqwest::Client, + secrets: impl IntoIterator, + rx: &'a WebhookReceiver, + nexus_id: OmicronZoneUuid, + ) -> Result { + let secrets = secrets + .into_iter() + .map(|WebhookSecret { identity, secret, .. }| { + let mac = Hmac::::new_from_slice(secret.as_bytes()) + .expect("HMAC key can be any size; this should never fail"); + (identity.id.into(), mac) + }) + .collect::>(); + if secrets.is_empty() { + return Err(Error::invalid_request( + "receiver has no secrets, so delivery requests cannot be sent", + )); + } + let hdr_rx_id = HeaderValue::try_from(rx.id().to_string()) + .expect("UUIDs should always be a valid header value"); + Ok(Self { client, secrets, hdr_rx_id, rx, nexus_id }) + } + + pub(crate) async fn send_delivery_request( + &mut self, + opctx: &OpContext, + delivery: &WebhookDelivery, + event_class: WebhookEventClass, + ) -> Result { + const HDR_DELIVERY_ID: HeaderName = + HeaderName::from_static("x-oxide-delivery-id"); + const HDR_RX_ID: HeaderName = + HeaderName::from_static("x-oxide-webhook-id"); + const HDR_EVENT_ID: HeaderName = + HeaderName::from_static("x-oxide-event-id"); + const HDR_EVENT_CLASS: HeaderName = + HeaderName::from_static("x-oxide-event-class"); + const HDR_SIG: HeaderName = + HeaderName::from_static("x-oxide-signature"); + + #[derive(serde::Serialize, Debug)] + struct Payload<'a> { + event_class: WebhookEventClass, + event_id: WebhookEventUuid, + data: &'a serde_json::Value, + delivery: DeliveryMetadata<'a>, + } + + #[derive(serde::Serialize, Debug)] + struct DeliveryMetadata<'a> { + id: WebhookDeliveryUuid, + webhook_id: WebhookReceiverUuid, + sent_at: &'a str, + trigger: views::WebhookDeliveryTrigger, + } + + // okay, actually do the thing... + let time_attempted = Utc::now(); + let sent_at = time_attempted.to_rfc3339(); + let payload = Payload { + event_class, + event_id: delivery.event_id.into(), + data: &delivery.payload, + delivery: DeliveryMetadata { + id: delivery.id.into(), + webhook_id: self.rx.id(), + sent_at: &sent_at, + trigger: delivery.triggered_by.into(), + }, + }; + // N.B. that we serialize the body "ourselves" rather than just + // passing it to `RequestBuilder::json` because we must access + // the serialized body in order to calculate HMAC signatures. + // This means we have to add the `Content-Type` ourselves below. + let body = match serde_json::to_vec(&payload) { + Ok(body) => body, + Err(e) => { + const MSG: &'static str = + "event payload could not be serialized"; + slog::error!( + &opctx.log, + "webhook {MSG}"; + "event_id" => %delivery.event_id, + "event_class" => %event_class, + "delivery_id" => %delivery.id, + "delivery_trigger" => %delivery.triggered_by, + "error" => %e, + ); + + // This really shouldn't happen --- we expect the event + // payload will always be valid JSON. We could *probably* + // just panic here unconditionally, but it seems nicer to + // try and do the other events. But, if there's ever a bug + // that breaks serialization for *all* webhook payloads, + // I'd like the tests to fail in a more obvious way than + // eventually timing out waiting for the event to be + // delivered ... + if cfg!(debug_assertions) { + panic!("{MSG}: {e}\npayload: {payload:#?}"); + } + return Err(e).context(MSG); + } + }; + let mut request = self + .client + .post(&self.rx.endpoint) + .header(HDR_RX_ID, self.hdr_rx_id.clone()) + .header(HDR_DELIVERY_ID, delivery.id.to_string()) + .header(HDR_EVENT_ID, delivery.event_id.to_string()) + .header(HDR_EVENT_CLASS, event_class.to_string()) + .header(http::header::CONTENT_TYPE, "application/json"); + + // For each secret assigned to this webhook, calculate the HMAC and add a signature header. + for (secret_id, mac) in &mut self.secrets { + mac.update(&body); + let sig_bytes = mac.finalize_reset().into_bytes(); + let sig = hex::encode(&sig_bytes[..]); + request = request + .header(HDR_SIG, format!("a=sha256&id={secret_id}&s={sig}")); + } + let request = request.body(body).build(); + + let request = match request { + // We couldn't construct a request for some reason! This one's + // our fault, so don't penalize the receiver for it. + Err(e) => { + const MSG: &str = "failed to construct webhook request"; + slog::error!( + &opctx.log, + "{MSG}"; + "event_id" => %delivery.event_id, + "event_class" => %event_class, + "delivery_id" => %delivery.id, + "delivery_trigger" => %delivery.triggered_by, + "error" => %e, + "payload" => ?payload, + ); + return Err(e).context(MSG); + } + Ok(r) => r, + }; + let t0 = Instant::now(); + let result = self.client.execute(request).await; + let duration = t0.elapsed(); + let (delivery_result, status) = match result { + // Builder errors are our fault, that's weird! + Err(e) if e.is_builder() => { + const MSG: &str = + "internal error constructing webhook delivery request"; + slog::error!( + &opctx.log, + "{MSG}"; + "event_id" => %delivery.event_id, + "event_class" => %event_class, + "delivery_id" => %delivery.id, + "delivery_trigger" => %delivery.triggered_by, + "error" => %e, + ); + return Err(e).context(MSG); + } + Err(e) => { + if let Some(status) = e.status() { + slog::warn!( + &opctx.log, + "webhook receiver endpoint returned an HTTP error"; + "event_id" => %delivery.event_id, + "event_class" => %event_class, + "delivery_id" => %delivery.id, + "delivery_trigger" => %delivery.triggered_by, + "response_status" => ?status, + "response_duration" => ?duration, + ); + ( + WebhookDeliveryAttemptResult::FailedHttpError, + Some(status), + ) + } else { + let result = if e.is_connect() { + WebhookDeliveryAttemptResult::FailedUnreachable + } else if e.is_timeout() { + WebhookDeliveryAttemptResult::FailedTimeout + } else if e.is_redirect() { + WebhookDeliveryAttemptResult::FailedHttpError + } else { + WebhookDeliveryAttemptResult::FailedUnreachable + }; + slog::warn!( + &opctx.log, + "webhook delivery request failed"; + "event_id" => %delivery.event_id, + "event_class" => %event_class, + "delivery_id" => %delivery.id, + "delivery_trigger" => %delivery.triggered_by, + "error" => %e, + ); + (result, None) + } + } + Ok(rsp) => { + let status = rsp.status(); + if status.is_success() { + slog::debug!( + &opctx.log, + "webhook event delivered successfully"; + "event_id" => %delivery.event_id, + "event_class" => %event_class, + "delivery_id" => %delivery.id, + "delivery_trigger" => %delivery.triggered_by, + "response_status" => ?status, + "response_duration" => ?duration, + ); + (WebhookDeliveryAttemptResult::Succeeded, Some(status)) + } else { + slog::warn!( + &opctx.log, + "webhook receiver endpoint returned an HTTP error"; + "event_id" => %delivery.event_id, + "event_class" => %event_class, + "delivery_id" => %delivery.id, + "delivery_trigger" => %delivery.triggered_by, + "response_status" => ?status, + "response_duration" => ?duration, + ); + ( + WebhookDeliveryAttemptResult::FailedHttpError, + Some(status), + ) + } + } + }; + // only include a response duration if we actually got a response back + let response_duration = status.map(|_| { + TimeDelta::from_std(duration).expect( + "because we set a 30-second response timeout, there is no \ + way a response duration could ever exceed the max \ + representable TimeDelta of `i64::MAX` milliseconds", + ) + }); + + Ok(WebhookDeliveryAttempt { + delivery_id: delivery.id, + rx_id: delivery.rx_id, + attempt: SqlU8::new(delivery.attempts.0 + 1), + result: delivery_result, + response_status: status.map(|s| s.as_u16() as i16), + response_duration, + time_created: chrono::Utc::now(), + deliverator_id: self.nexus_id.into(), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::Nexus; + use std::num::NonZeroU32; + + #[test] + fn test_event_class_list() { + #[track_caller] + fn list( + filter: Option<&str>, + last_seen: Option<&str>, + limit: u32, + ) -> Vec { + let filter = params::EventClassFilter { + filter: dbg!(filter).map(ToString::to_string), + }; + let marker = dbg!(last_seen).map(|last_seen| { + params::EventClassPage { last_seen: last_seen.to_string() } + }); + let result = Nexus::actually_list_event_classes( + filter, + DataPageParams { + marker: marker.as_ref(), + direction: dropshot::PaginationOrder::Ascending, + limit: NonZeroU32::new(dbg!(limit)).unwrap(), + }, + ); + + // Throw away the description fields + dbg!(result) + .unwrap() + .into_iter() + .map(|view| view.name) + .collect::>() + } + + // Paginated class list, without a glob filter. + let classes = list(None, None, 3); + assert_eq!(classes, &["probe", "test.foo", "test.foo.bar"]); + let classes = list(None, Some("test.foo.bar"), 3); + assert_eq!( + classes, + &["test.foo.baz", "test.quux.bar", "test.quux.bar.baz"] + ); + // Don't assert that a third list will return no more results, since + // more events may be added in the future, and we don't have a filter. + + // Try a filter for only `test.**` events. + let filter = Some("test.**"); + let classes = list(filter, None, 2); + assert_eq!(classes, &["test.foo", "test.foo.bar"]); + let classes = list(filter, Some("test.foo.bar"), 2); + assert_eq!(classes, &["test.foo.baz", "test.quux.bar"]); + let classes = list(filter, Some("test.quux.bar"), 2); + assert_eq!(classes, &["test.quux.bar.baz"]); + let classes = list(filter, Some("test.quux.bar.baz"), 2); + assert_eq!(classes, Vec::::new()); + } +} diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index 40da27505e7..20587d1d065 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -89,9 +89,11 @@ use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::external::http_pagination::PaginatedById; use omicron_common::api::external::http_pagination::PaginatedByName; use omicron_common::api::external::http_pagination::PaginatedByNameOrId; +use omicron_common::api::external::http_pagination::PaginatedByTimeAndId; use omicron_common::api::external::http_pagination::ScanById; use omicron_common::api::external::http_pagination::ScanByName; use omicron_common::api::external::http_pagination::ScanByNameOrId; +use omicron_common::api::external::http_pagination::ScanByTimeAndId; use omicron_common::api::external::http_pagination::ScanParams; use omicron_common::api::external::http_pagination::data_page_params_for; use omicron_common::api::external::http_pagination::id_pagination; @@ -7667,4 +7669,351 @@ impl NexusExternalApi for NexusExternalApiImpl { .instrument_dropshot_handler(&rqctx, handler) .await } + + async fn webhook_event_class_list( + rqctx: RequestContext, + pag_params: Query< + PaginationParams, + >, + filter: Query, + ) -> Result>, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + + let query = pag_params.into_inner(); + let filter = filter.into_inner(); + let marker = match query.page { + WhichPage::First(_) => None, + WhichPage::Next(ref addr) => Some(addr), + }; + let pag_params = DataPageParams { + limit: rqctx.page_limit(&query)?, + direction: PaginationOrder::Ascending, + marker, + }; + let event_classes = nexus + .webhook_event_class_list(&opctx, filter, pag_params) + .await?; + Ok(HttpResponseOk(ResultsPage::new( + event_classes, + &EmptyScanParams {}, + |class: &views::EventClass, _| class.name.clone(), + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn webhook_receiver_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result>, HttpError> + { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + + let query = query_params.into_inner(); + let pagparams = data_page_params_for(&rqctx, &query)?; + let scan_params = ScanByNameOrId::from_query(&query)?; + let paginated_by = name_or_id_pagination(&pagparams, scan_params)?; + + let rxs = nexus + .webhook_receiver_list(&opctx, &paginated_by) + .await? + .into_iter() + .map(views::WebhookReceiver::try_from) + .collect::, _>>()?; + + Ok(HttpResponseOk(ScanByNameOrId::results_page( + &query, + rxs, + &marker_for_name_or_id, + )?)) + }; + + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn webhook_receiver_view( + rqctx: RequestContext, + path_params: Path, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let webhook_selector = path_params.into_inner(); + let rx = nexus.webhook_receiver_lookup(&opctx, webhook_selector)?; + let webhook = + nexus.webhook_receiver_config_fetch(&opctx, rx).await?; + Ok(HttpResponseOk(views::WebhookReceiver::try_from(webhook)?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn webhook_receiver_create( + rqctx: RequestContext, + params: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let params = params.into_inner(); + + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + let receiver = + nexus.webhook_receiver_create(&opctx, params).await?; + Ok(HttpResponseCreated(views::WebhookReceiver::try_from(receiver)?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn webhook_receiver_update( + rqctx: RequestContext, + path_params: Path, + params: TypedBody, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + + let webhook_selector = path_params.into_inner(); + let params = params.into_inner(); + let rx = nexus.webhook_receiver_lookup(&opctx, webhook_selector)?; + nexus.webhook_receiver_update(&opctx, rx, params).await?; + + Ok(HttpResponseUpdatedNoContent()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn webhook_receiver_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + + let webhook_selector = path_params.into_inner(); + let rx = nexus.webhook_receiver_lookup(&opctx, webhook_selector)?; + nexus.webhook_receiver_delete(&opctx, rx).await?; + + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn webhook_receiver_probe( + rqctx: RequestContext, + path_params: Path, + query_params: Query, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + + let webhook_selector = path_params.into_inner(); + let probe_params = query_params.into_inner(); + let rx = nexus.webhook_receiver_lookup(&opctx, webhook_selector)?; + let result = + nexus.webhook_receiver_probe(&opctx, rx, probe_params).await?; + // TODO(eliza): send the status code that came back from the probe req... + Ok(HttpResponseOk(result)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn webhook_secrets_list( + rqctx: RequestContext, + query_params: Query, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + + let webhook_selector = query_params.into_inner(); + let rx = nexus.webhook_receiver_lookup(&opctx, webhook_selector)?; + let secrets = nexus + .webhook_receiver_secrets_list(&opctx, rx) + .await? + .into_iter() + .map(Into::into) + .collect(); + + Ok(HttpResponseOk(views::WebhookSecrets { secrets })) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + /// Add a secret to a webhook. + async fn webhook_secrets_add( + rqctx: RequestContext, + query_params: Query, + params: TypedBody, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + + let params::WebhookSecretCreate { secret } = params.into_inner(); + let webhook_selector = query_params.into_inner(); + let rx = nexus.webhook_receiver_lookup(&opctx, webhook_selector)?; + let secret = + nexus.webhook_receiver_secret_add(&opctx, rx, secret).await?; + Ok(HttpResponseCreated(secret)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + /// Delete a secret from a webhook receiver. + async fn webhook_secrets_delete( + rqctx: RequestContext, + path_params: Path, + ) -> Result { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + + let secret_selector = path_params.into_inner(); + let secret = + nexus.webhook_secret_lookup(&opctx, secret_selector)?; + nexus.webhook_receiver_secret_delete(&opctx, secret).await?; + + Ok(HttpResponseDeleted()) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn webhook_delivery_list( + rqctx: RequestContext, + receiver: Query, + filter: Query, + query: Query, + ) -> Result>, HttpError> + { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + + let webhook_selector = receiver.into_inner(); + let filter = filter.into_inner(); + let query = query.into_inner(); + let pag_params = data_page_params_for(&rqctx, &query)?; + let rx = nexus.webhook_receiver_lookup(&opctx, webhook_selector)?; + let deliveries = nexus + .webhook_receiver_delivery_list(&opctx, rx, filter, &pag_params) + .await?; + + Ok(HttpResponseOk(ScanByTimeAndId::results_page( + &query, + deliveries, + &|_, d| (d.time_started, d.id), + )?)) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } + + async fn webhook_delivery_resend( + rqctx: RequestContext, + path_params: Path, + receiver: Query, + ) -> Result, HttpError> { + let apictx = rqctx.context(); + let handler = async { + let nexus = &apictx.context.nexus; + + let opctx = + crate::context::op_context_for_external_api(&rqctx).await?; + + let event_selector = path_params.into_inner(); + let webhook_selector = receiver.into_inner(); + let event = nexus.webhook_event_lookup(&opctx, event_selector)?; + let rx = nexus.webhook_receiver_lookup(&opctx, webhook_selector)?; + let delivery_id = + nexus.webhook_receiver_event_resend(&opctx, rx, event).await?; + + Ok(HttpResponseCreated(views::WebhookDeliveryId { + delivery_id: delivery_id.into_untyped_uuid(), + })) + }; + apictx + .context + .external_latencies + .instrument_dropshot_handler(&rqctx, handler) + .await + } } diff --git a/nexus/tests/config.test.toml b/nexus/tests/config.test.toml index 1aa20e4b3d1..ab3cbcd9af9 100644 --- a/nexus/tests/config.test.toml +++ b/nexus/tests/config.test.toml @@ -161,6 +161,15 @@ region_snapshot_replacement_finish.period_secs = 60 tuf_artifact_replication.period_secs = 3600 # Update integration tests are started with 4 sled agents. tuf_artifact_replication.min_sled_replication = 3 +# In general, the webhook dispatcher will be activated when events are queued, +# so we don't need to periodically activate it *that* frequently. +webhook_dispatcher.period_secs = 60 +webhook_deliverator.period_secs = 60 +# In order to test webhook delivery retry behavior without waiting for a long +# time, turn these backoff periods down from multiple minutes to just a couple +# seconds. +webhook_deliverator.first_retry_backoff_secs = 10 +webhook_deliverator.second_retry_backoff_secs = 20 read_only_region_replacement_start.period_secs = 60 [default_region_allocation_strategy] diff --git a/nexus/tests/integration_tests/endpoints.rs b/nexus/tests/integration_tests/endpoints.rs index a606a0c94c2..714da64b981 100644 --- a/nexus/tests/integration_tests/endpoints.rs +++ b/nexus/tests/integration_tests/endpoints.rs @@ -1144,6 +1144,67 @@ pub static DEMO_TARGET_RELEASE: LazyLock = system_version: Version::new(0, 0, 0), }); +// Webhooks +pub static WEBHOOK_RECEIVERS_URL: &'static str = "/v1/webhooks/receivers"; +pub static WEBHOOK_EVENT_CLASSES_URL: &'static str = + "/v1/webhooks/event-classes"; + +pub static DEMO_WEBHOOK_RECEIVER_NAME: LazyLock = + LazyLock::new(|| "my-great-webhook".parse().unwrap()); +pub static DEMO_WEBHOOK_RECEIVER_CREATE: LazyLock = + LazyLock::new(|| params::WebhookCreate { + identity: IdentityMetadataCreateParams { + name: DEMO_WEBHOOK_RECEIVER_NAME.clone(), + description: "webhook, line, and sinker".to_string(), + }, + endpoint: "https://example.com/my-great-webhook".parse().unwrap(), + secrets: vec!["my cool secret".to_string()], + events: vec!["test.foo.bar".to_string(), "test.*".to_string()], + }); + +pub static DEMO_WEBHOOK_RECEIVER_UPDATE: LazyLock< + params::WebhookReceiverUpdate, +> = LazyLock::new(|| params::WebhookReceiverUpdate { + identity: IdentityMetadataUpdateParams { + name: None, + description: Some("webhooked on phonics".to_string()), + }, + endpoint: Some("https://example.com/my-cool-webhook".parse().unwrap()), + events: Some(vec![ + "test.foo.bar".to_string(), + "test.*".to_string(), + "test.**.baz".to_string(), + ]), +}); + +pub static DEMO_WEBHOOK_RECEIVER_URL: LazyLock = LazyLock::new(|| { + format!("{WEBHOOK_RECEIVERS_URL}/{}", *DEMO_WEBHOOK_RECEIVER_NAME) +}); +pub static DEMO_WEBHOOK_RECEIVER_PROBE_URL: LazyLock = + LazyLock::new(|| { + format!("{WEBHOOK_RECEIVERS_URL}/{}/probe", *DEMO_WEBHOOK_RECEIVER_NAME) + }); +pub static DEMO_WEBHOOK_DELIVERY_URL: LazyLock = LazyLock::new(|| { + format!("/v1/webhooks/deliveries?receiver={}", *DEMO_WEBHOOK_RECEIVER_NAME) +}); + +pub static DEMO_WEBHOOK_SECRETS_URL: LazyLock = LazyLock::new(|| { + format!("/v1/webhooks/secrets?receiver={}", *DEMO_WEBHOOK_RECEIVER_NAME) +}); + +pub static DEMO_WEBHOOK_SECRET_DELETE_URL: LazyLock = + LazyLock::new(|| { + format!( + "/v1/webhooks/secrets/{{id}}?receiver={}", + *DEMO_WEBHOOK_RECEIVER_NAME + ) + }); + +pub static DEMO_WEBHOOK_SECRET_CREATE: LazyLock = + LazyLock::new(|| params::WebhookSecretCreate { + secret: "TRUSTNO1".to_string(), + }); + /// Describes an API endpoint to be verified by the "unauthorized" test /// /// These structs are also used to check whether we're covering all endpoints in @@ -2713,5 +2774,69 @@ pub static VERIFY_ENDPOINTS: LazyLock> = ), ], }, + // Webhooks + VerifyEndpoint { + url: &WEBHOOK_RECEIVERS_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Post( + serde_json::to_value(&*DEMO_WEBHOOK_RECEIVER_CREATE) + .unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_WEBHOOK_RECEIVER_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Put( + serde_json::to_value(&*DEMO_WEBHOOK_RECEIVER_UPDATE) + .unwrap(), + ), + AllowedMethod::Delete, + ], + }, + VerifyEndpoint { + url: &DEMO_WEBHOOK_RECEIVER_PROBE_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Post( + serde_json::to_value(()).unwrap(), + )], + }, + VerifyEndpoint { + url: &DEMO_WEBHOOK_SECRETS_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![ + AllowedMethod::Get, + AllowedMethod::Post( + serde_json::to_value(&*DEMO_WEBHOOK_SECRET_CREATE) + .unwrap(), + ), + ], + }, + VerifyEndpoint { + url: &DEMO_WEBHOOK_SECRET_DELETE_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Delete], + }, + VerifyEndpoint { + url: &DEMO_WEBHOOK_DELIVERY_URL, + visibility: Visibility::Protected, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, + VerifyEndpoint { + url: &WEBHOOK_EVENT_CLASSES_URL, + visibility: Visibility::Public, + unprivileged_access: UnprivilegedAccess::None, + allowed_methods: vec![AllowedMethod::Get], + }, ] }); diff --git a/nexus/tests/integration_tests/mod.rs b/nexus/tests/integration_tests/mod.rs index 28ed9cc4c9f..8aabc169f5c 100644 --- a/nexus/tests/integration_tests/mod.rs +++ b/nexus/tests/integration_tests/mod.rs @@ -59,6 +59,7 @@ mod vpc_firewall; mod vpc_routers; mod vpc_subnets; mod vpcs; +mod webhooks; // This module is used only for shared data, not test cases. mod endpoints; diff --git a/nexus/tests/integration_tests/unauthorized.rs b/nexus/tests/integration_tests/unauthorized.rs index 8df0bffdb4e..5c9b534d9c1 100644 --- a/nexus/tests/integration_tests/unauthorized.rs +++ b/nexus/tests/integration_tests/unauthorized.rs @@ -370,6 +370,18 @@ static SETUP_REQUESTS: LazyLock> = LazyLock::new(|| { body: serde_json::to_value(()).unwrap(), id_routes: vec!["/experimental/v1/system/support-bundles/{id}"], }, + // Create a webhook receiver + SetupReq::Post { + url: &WEBHOOK_RECEIVERS_URL, + body: serde_json::to_value(&*DEMO_WEBHOOK_RECEIVER_CREATE).unwrap(), + id_routes: vec![], + }, + // Create a secret for that receiver + SetupReq::Post { + url: &DEMO_WEBHOOK_SECRETS_URL, + body: serde_json::to_value(&*DEMO_WEBHOOK_SECRET_CREATE).unwrap(), + id_routes: vec![&*DEMO_WEBHOOK_SECRET_DELETE_URL], + }, ] }); diff --git a/nexus/tests/integration_tests/webhooks.rs b/nexus/tests/integration_tests/webhooks.rs new file mode 100644 index 00000000000..3fd1b554889 --- /dev/null +++ b/nexus/tests/integration_tests/webhooks.rs @@ -0,0 +1,1295 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Webhooks + +use dropshot::test_util::ClientTestContext; +use hmac::{Hmac, Mac}; +use httpmock::prelude::*; +use nexus_db_model::WebhookEventClass; +use nexus_db_queries::context::OpContext; +use nexus_test_utils::background::activate_background_task; +use nexus_test_utils::http_testing::AuthnMode; +use nexus_test_utils::http_testing::NexusRequest; +use nexus_test_utils::http_testing::RequestBuilder; +use nexus_test_utils::resource_helpers; +use nexus_test_utils_macros::nexus_test; +use nexus_types::external_api::{params, views}; +use omicron_common::api::external::IdentityMetadataCreateParams; +use omicron_common::api::external::NameOrId; +use omicron_uuid_kinds::GenericUuid; +use omicron_uuid_kinds::WebhookEventUuid; +use omicron_uuid_kinds::WebhookReceiverUuid; +use sha2::Sha256; +use std::time::Duration; +use uuid::Uuid; + +type ControlPlaneTestContext = + nexus_test_utils::ControlPlaneTestContext; + +const RECEIVERS_BASE_PATH: &str = "/v1/webhooks/receivers"; +const SECRETS_BASE_PATH: &str = "/v1/webhooks/secrets"; +const DELIVERIES_BASE_PATH: &str = "/v1/webhooks/deliveries"; + +async fn webhook_create( + ctx: &ControlPlaneTestContext, + params: ¶ms::WebhookCreate, +) -> views::WebhookReceiver { + resource_helpers::object_create::< + params::WebhookCreate, + views::WebhookReceiver, + >(&ctx.external_client, RECEIVERS_BASE_PATH, params) + .await +} + +fn get_webhooks_url(name_or_id: impl Into) -> String { + let name_or_id = name_or_id.into(); + format!("{RECEIVERS_BASE_PATH}/{name_or_id}") +} + +async fn webhook_get( + client: &ClientTestContext, + webhook_url: &str, +) -> views::WebhookReceiver { + webhook_get_as(client, webhook_url, AuthnMode::PrivilegedUser).await +} + +async fn webhook_get_as( + client: &ClientTestContext, + webhook_url: &str, + authn_as: AuthnMode, +) -> views::WebhookReceiver { + NexusRequest::object_get(client, &webhook_url) + .authn_as(authn_as) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap() +} + +async fn webhook_rx_list( + client: &ClientTestContext, +) -> Vec { + resource_helpers::objects_list_page_authz::( + client, + RECEIVERS_BASE_PATH, + ) + .await + .items +} + +async fn webhook_secrets_get( + client: &ClientTestContext, + webhook_name_or_id: impl Into, +) -> views::WebhookSecrets { + let name_or_id = webhook_name_or_id.into(); + NexusRequest::object_get( + client, + &format!("{SECRETS_BASE_PATH}/?receiver={name_or_id}"), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap() +} + +fn resend_url( + webhook_name_or_id: impl Into, + event_id: WebhookEventUuid, +) -> String { + let rx = webhook_name_or_id.into(); + format!("{DELIVERIES_BASE_PATH}/{event_id}/resend?receiver={rx}") +} +async fn webhook_delivery_resend( + client: &ClientTestContext, + webhook_name_or_id: impl Into, + event_id: WebhookEventUuid, +) -> views::WebhookDeliveryId { + let req = RequestBuilder::new( + client, + http::Method::POST, + &resend_url(webhook_name_or_id, event_id), + ) + .body::(None) + .expect_status(Some(http::StatusCode::CREATED)); + NexusRequest::new(req) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap() +} + +async fn webhook_delivery_resend_error( + client: &ClientTestContext, + webhook_name_or_id: impl Into, + event_id: WebhookEventUuid, + status: http::StatusCode, +) -> dropshot::HttpErrorResponseBody { + let req = RequestBuilder::new( + client, + http::Method::POST, + &resend_url(webhook_name_or_id, event_id), + ) + .body::(None) + .expect_status(Some(status)); + NexusRequest::new(req) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap() + .parsed_body() + .unwrap() +} + +fn my_great_webhook_params( + mock: &httpmock::MockServer, +) -> params::WebhookCreate { + params::WebhookCreate { + identity: IdentityMetadataCreateParams { + name: "my-great-webhook".parse().unwrap(), + description: String::from("my great webhook"), + }, + endpoint: mock + .url("/webhooks") + .parse() + .expect("this should be a valid URL"), + secrets: vec![MY_COOL_SECRET.to_string()], + events: vec!["test.foo".to_string()], + } +} + +const MY_COOL_SECRET: &str = "my cool secret"; + +async fn secret_add( + ctx: &ControlPlaneTestContext, + webhook_id: WebhookReceiverUuid, + params: ¶ms::WebhookSecretCreate, +) -> views::WebhookSecretId { + resource_helpers::object_create::< + params::WebhookSecretCreate, + views::WebhookSecretId, + >( + &ctx.external_client, + &format!("{SECRETS_BASE_PATH}/?receiver={webhook_id}"), + params, + ) + .await +} + +async fn webhook_send_probe( + ctx: &ControlPlaneTestContext, + webhook_id: &WebhookReceiverUuid, + resend: bool, + status: http::StatusCode, +) -> views::WebhookProbeResult { + let pathparams = if resend { "?resend=true" } else { "" }; + let path = format!("{RECEIVERS_BASE_PATH}/{webhook_id}/probe{pathparams}"); + NexusRequest::new( + RequestBuilder::new(&ctx.external_client, http::Method::POST, &path) + .expect_status(Some(status)), + ) + .authn_as(AuthnMode::PrivilegedUser) + .execute() + .await + .unwrap_or_else(|e| { + panic!("failed to make \"POST\" request to {path}: {e}") + }) + .parsed_body() + .unwrap() +} + +fn is_valid_for_webhook( + webhook: &views::WebhookReceiver, +) -> impl FnOnce(httpmock::When) -> httpmock::When { + let path = webhook.endpoint.path().to_string(); + let id = webhook.identity.id.to_string(); + move |when| { + when.path(path) + .header("x-oxide-webhook-id", id) + .header_exists("x-oxide-delivery-id") + .header_exists("x-oxide-signature") + .header("content-type", "application/json") + } +} + +fn signature_verifies( + secret_id: Uuid, + secret: Vec, +) -> impl Fn(&HttpMockRequest) -> bool { + let secret_id = secret_id.to_string(); + move |req| { + // N.B. that `HttpMockRequest::headers_vec()`, which returns a + // `Vec<(String, String)>` is used here, rather than + // `HttpMockRequest::headers()`, which returns a `HeaderMap`. This is + // currently necessary because of a `httpmock` bug where, when multiple + // values for the same header are present in the request, the map + // returned by `headers()` will only contain one of those values. See: + // https://github.com/alexliesenfeld/httpmock/issues/119 + let hdrs = req.headers_vec(); + let Some(sig_hdr) = hdrs.iter().find_map(|(name, hdr)| { + if name != "x-oxide-signature" { + return None; + } + // Signature header format: + // a={algorithm}&id={secret_id}&s={signature} + + // Strip the expected algorithm part. Note that we only support + // SHA256 for now. Panic if this is invalid. + let hdr = hdr + .strip_prefix("a=sha256") + .expect("all x-oxide-signature headers should be SHA256"); + // Strip the leading `&id=` for the ID part, panicking if this + // is not found. + let hdr = hdr.strip_prefix("&id=").expect( + "all x-oxide-signature headers should have a secret ID part", + ); + // If the ID isn't the one we're looking for, we want to keep + // going, so just return `None` here + let hdr = hdr.strip_prefix(secret_id.as_str())?; + // Finally, extract the signature part by stripping the &s= + // prefix. + hdr.strip_prefix("&s=") + }) else { + panic!( + "no x-oxide-signature header for secret with ID {secret_id} found" + ); + }; + let sig_bytes = hex::decode(sig_hdr) + .expect("x-oxide-signature signature value should be a hex string"); + let mut mac = Hmac::::new_from_slice(&secret[..]) + .expect("HMAC secrets can be any length"); + mac.update(req.body().as_ref()); + mac.verify_slice(&sig_bytes).is_ok() + } +} + +#[nexus_test] +async fn test_webhook_receiver_get(cptestctx: &ControlPlaneTestContext) { + let client = &cptestctx.external_client; + + let server = httpmock::MockServer::start_async().await; + + // Create a webhook receiver. + let created_webhook = + webhook_create(&cptestctx, &my_great_webhook_params(&server)).await; + dbg!(&created_webhook); + + // Fetch the receiver by ID. + let by_id_url = get_webhooks_url(created_webhook.identity.id); + let webhook_view = webhook_get(client, &by_id_url).await; + assert_eq!(created_webhook, webhook_view); + + // Fetch the receiver by name. + let by_name_url = get_webhooks_url(created_webhook.identity.name.clone()); + let webhook_view = webhook_get(client, &by_name_url).await; + assert_eq!(created_webhook, webhook_view); +} + +#[nexus_test] +async fn test_webhook_receiver_create_delete( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + let server = httpmock::MockServer::start_async().await; + + // Create a webhook receiver. + let created_webhook = + webhook_create(&cptestctx, &my_great_webhook_params(&server)).await; + dbg!(&created_webhook); + + resource_helpers::object_delete( + client, + &format!("{RECEIVERS_BASE_PATH}/{}", created_webhook.identity.name), + ) + .await; + + // It should be gone now. + resource_helpers::object_delete_error( + client, + &format!("{RECEIVERS_BASE_PATH}/{}", created_webhook.identity.name), + http::StatusCode::NOT_FOUND, + ) + .await; +} +#[nexus_test] +async fn test_webhook_receiver_names_are_unique( + cptestctx: &ControlPlaneTestContext, +) { + let client = &cptestctx.external_client; + + let server = httpmock::MockServer::start_async().await; + + // Create a webhook receiver. + let created_webhook = + webhook_create(&cptestctx, &my_great_webhook_params(&server)).await; + dbg!(&created_webhook); + + let error = resource_helpers::object_create_error( + &client, + RECEIVERS_BASE_PATH, + &my_great_webhook_params(&server), + http::StatusCode::BAD_REQUEST, + ) + .await; + assert_eq!( + dbg!(&error).message, + "already exists: webhook-receiver \"my-great-webhook\"" + ); +} + +#[nexus_test] +async fn test_event_delivery(cptestctx: &ControlPlaneTestContext) { + let nexus = cptestctx.server.server_context().nexus.clone(); + let internal_client = &cptestctx.internal_client; + + let datastore = nexus.datastore(); + let opctx = + OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); + + let server = httpmock::MockServer::start_async().await; + + let id = WebhookEventUuid::new_v4(); + + // Create a webhook receiver. + let webhook = + webhook_create(&cptestctx, &my_great_webhook_params(&server)).await; + dbg!(&webhook); + + let mock = { + let webhook = webhook.clone(); + server + .mock_async(move |when, then| { + let body = serde_json::json!({ + "event_class": "test.foo", + "event_id": id, + "data": { + "hello_world": true, + } + }) + .to_string(); + when.method(POST) + .header("x-oxide-event-class", "test.foo") + .header("x-oxide-event-id", id.to_string()) + .and(is_valid_for_webhook(&webhook)) + .is_true(signature_verifies( + webhook.secrets[0].id, + MY_COOL_SECRET.as_bytes().to_vec(), + )) + .json_body_includes(body); + then.status(200); + }) + .await + }; + + // Publish an event + let event = nexus + .webhook_event_publish( + &opctx, + id, + WebhookEventClass::TestFoo, + serde_json::json!({"hello_world": true}), + ) + .await + .expect("event should be published successfully"); + dbg!(event); + + dbg!(activate_background_task(internal_client, "webhook_dispatcher").await); + dbg!( + activate_background_task(internal_client, "webhook_deliverator").await + ); + + mock.assert_async().await; +} + +#[nexus_test] +async fn test_multiple_secrets(cptestctx: &ControlPlaneTestContext) { + let nexus = cptestctx.server.server_context().nexus.clone(); + let internal_client = &cptestctx.internal_client; + + let datastore = nexus.datastore(); + let opctx = + OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); + + let server = httpmock::MockServer::start_async().await; + + let id = WebhookEventUuid::new_v4(); + let endpoint = + server.url("/webhooks").parse().expect("this should be a valid URL"); + + const SECRET1: &str = "it's an older code, sir, but it checks out"; + const SECRET2: &str = "Joshua"; + const SECRET3: &str = "Setec Astronomy"; + + // Create a webhook receiver. + let webhook = webhook_create( + &cptestctx, + ¶ms::WebhookCreate { + identity: IdentityMetadataCreateParams { + name: "my-great-webhook".parse().unwrap(), + description: String::from("my great webhook"), + }, + endpoint, + secrets: vec![SECRET1.to_string()], + events: vec!["test.foo".to_string()], + }, + ) + .await; + dbg!(&webhook); + let rx_id = WebhookReceiverUuid::from_untyped_uuid(webhook.identity.id); + + let secret1_id = webhook.secrets[0].id; + + let client = &cptestctx.external_client; + let assert_secrets_get = |mut expected: Vec| async move { + let mut actual = webhook_secrets_get(client, rx_id.into_untyped_uuid()) + .await + .secrets + .into_iter() + .map(|secret| secret.id) + .collect::>(); + actual.sort(); + expected.sort(); + assert_eq!(expected, actual); + }; + + assert_secrets_get(vec![secret1_id]).await; + + // Add a second secret to the webhook receiver. + let secret2_id = dbg!( + secret_add( + &cptestctx, + rx_id, + ¶ms::WebhookSecretCreate { secret: SECRET2.to_string() }, + ) + .await + ) + .id; + assert_secrets_get(vec![secret1_id, secret2_id]).await; + + // And a third one, just for kicks. + let secret3_id = dbg!( + secret_add( + &cptestctx, + rx_id, + ¶ms::WebhookSecretCreate { secret: SECRET3.to_string() }, + ) + .await + ) + .id; + assert_secrets_get(vec![secret1_id, secret2_id, secret3_id]).await; + + let mock = server + .mock_async(|when, then| { + when.method(POST) + .header("x-oxide-event-class", "test.foo") + .header("x-oxide-event-id", id.to_string()) + .and(is_valid_for_webhook(&webhook)) + // There should be a signature header present for all three + // secrets, and they should all verify the contents of the + // webhook request. + .is_true(signature_verifies( + secret1_id, + SECRET1.as_bytes().to_vec(), + )) + .is_true(signature_verifies( + secret2_id, + SECRET2.as_bytes().to_vec(), + )) + .is_true(signature_verifies( + secret3_id, + SECRET3.as_bytes().to_vec(), + )); + then.status(200); + }) + .await; + + // Publish an event + let event = nexus + .webhook_event_publish( + &opctx, + id, + WebhookEventClass::TestFoo, + serde_json::json!({"hello_world": true}), + ) + .await + .expect("event should be published successfully"); + dbg!(event); + + dbg!(activate_background_task(internal_client, "webhook_dispatcher").await); + dbg!( + activate_background_task(internal_client, "webhook_deliverator").await + ); + + mock.assert_async().await; +} + +#[nexus_test] +async fn test_multiple_receivers(cptestctx: &ControlPlaneTestContext) { + let nexus = cptestctx.server.server_context().nexus.clone(); + let internal_client = &cptestctx.internal_client; + let client = &cptestctx.external_client; + + let datastore = nexus.datastore(); + let opctx = + OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); + + let bar_event_id = WebhookEventUuid::new_v4(); + let baz_event_id = WebhookEventUuid::new_v4(); + + let assert_webhook_rx_list_matches = + |mut expected: Vec| async move { + let mut actual = webhook_rx_list(client).await; + actual.sort_by_key(|rx| rx.identity.id); + expected.sort_by_key(|rx| rx.identity.id); + assert_eq!(expected, actual); + }; + + // Create three webhook receivers + let srv_bar = httpmock::MockServer::start_async().await; + const BAR_SECRET: &str = "this is bar's secret"; + let rx_bar = webhook_create( + &cptestctx, + ¶ms::WebhookCreate { + identity: IdentityMetadataCreateParams { + name: "webhooked-on-phonics".parse().unwrap(), + description: String::from("webhooked on phonics"), + }, + endpoint: srv_bar + .url("/webhooks") + .parse() + .expect("this should be a valid URL"), + secrets: vec![BAR_SECRET.to_string()], + events: vec!["test.foo.bar".to_string()], + }, + ) + .await; + dbg!(&rx_bar); + assert_webhook_rx_list_matches(vec![rx_bar.clone()]).await; + let mock_bar = { + let webhook = rx_bar.clone(); + srv_bar + .mock_async(move |when, then| { + when.method(POST) + .header("x-oxide-event-class", "test.foo.bar") + .header("x-oxide-event-id", bar_event_id.to_string()) + .and(is_valid_for_webhook(&webhook)) + .is_true(signature_verifies( + webhook.secrets[0].id, + BAR_SECRET.as_bytes().to_vec(), + )); + then.status(200); + }) + .await + }; + + let srv_baz = httpmock::MockServer::start_async().await; + const BAZ_SECRET: &str = "this is baz's secret"; + let rx_baz = webhook_create( + &cptestctx, + ¶ms::WebhookCreate { + identity: IdentityMetadataCreateParams { + name: "webhook-line-and-sinker".parse().unwrap(), + description: String::from("webhook, line, and sinker"), + }, + endpoint: srv_baz + .url("/webhooks") + .parse() + .expect("this should be a valid URL"), + secrets: vec![BAZ_SECRET.to_string()], + events: vec!["test.foo.baz".to_string()], + }, + ) + .await; + dbg!(&rx_baz); + assert_webhook_rx_list_matches(vec![rx_bar.clone(), rx_baz.clone()]).await; + let mock_baz = { + let webhook = rx_baz.clone(); + srv_baz + .mock_async(move |when, then| { + when.method(POST) + .header("x-oxide-event-class", "test.foo.baz") + .header("x-oxide-event-id", baz_event_id.to_string()) + .and(is_valid_for_webhook(&webhook)) + .is_true(signature_verifies( + webhook.secrets[0].id, + BAZ_SECRET.as_bytes().to_vec(), + )); + then.status(200); + }) + .await + }; + + let srv_star = httpmock::MockServer::start_async().await; + const STAR_SECRET: &str = "this is star's secret"; + let rx_star = webhook_create( + &cptestctx, + ¶ms::WebhookCreate { + identity: IdentityMetadataCreateParams { + name: "globulated".parse().unwrap(), + description: String::from("this one has globs"), + }, + endpoint: srv_star + .url("/webhooks") + .parse() + .expect("this should be a valid URL"), + secrets: vec![STAR_SECRET.to_string()], + events: vec!["test.foo.*".to_string()], + }, + ) + .await; + dbg!(&rx_star); + assert_webhook_rx_list_matches(vec![ + rx_bar.clone(), + rx_baz.clone(), + rx_star.clone(), + ]) + .await; + let mock_star = { + let webhook = rx_star.clone(); + srv_star + .mock_async(move |when, then| { + when.method(POST) + .header_matches( + "x-oxide-event-class", + "test\\.foo\\.ba[rz]", + ) + .header_exists("x-oxide-event-id") + .and(is_valid_for_webhook(&webhook)) + .is_true(signature_verifies( + webhook.secrets[0].id, + STAR_SECRET.as_bytes().to_vec(), + )); + then.status(200); + }) + .await + }; + + // Publish a test.foo.bar event + let event = nexus + .webhook_event_publish( + &opctx, + bar_event_id, + WebhookEventClass::TestFooBar, + serde_json::json!({"lol": "webhooked on phonics"}), + ) + .await + .expect("event should be published successfully"); + dbg!(event); + // Publish a test.foo.baz event + let event = nexus + .webhook_event_publish( + &opctx, + baz_event_id, + WebhookEventClass::TestFooBaz, + serde_json::json!({"lol": "webhook, line, and sinker"}), + ) + .await + .expect("event should be published successfully"); + dbg!(event); + + dbg!(activate_background_task(internal_client, "webhook_dispatcher").await); + dbg!( + activate_background_task(internal_client, "webhook_deliverator").await + ); + + // The `test.foo.bar` receiver should have received 1 event. + mock_bar.assert_calls_async(1).await; + + // The `test.foo.baz` receiver should have received 1 event. + mock_baz.assert_calls_async(1).await; + + // The `test.foo.*` receiver should have received both events. + mock_star.assert_calls_async(2).await; +} + +#[nexus_test] +async fn test_retry_backoff(cptestctx: &ControlPlaneTestContext) { + let nexus = cptestctx.server.server_context().nexus.clone(); + let internal_client = &cptestctx.internal_client; + + let datastore = nexus.datastore(); + let opctx = + OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); + + let server = httpmock::MockServer::start_async().await; + + let id = WebhookEventUuid::new_v4(); + + // Create a webhook receiver. + let webhook = + webhook_create(&cptestctx, &my_great_webhook_params(&server)).await; + dbg!(&webhook); + + let mock = { + let webhook = webhook.clone(); + server + .mock_async(move |when, then| { + let body = serde_json::json!({ + "event_class": "test.foo", + "event_id": id, + "data": { + "hello_world": true, + } + }) + .to_string(); + when.method(POST) + .header("x-oxide-event-class", "test.foo") + .header("x-oxide-event-id", id.to_string()) + .and(is_valid_for_webhook(&webhook)) + .is_true(signature_verifies( + webhook.secrets[0].id, + MY_COOL_SECRET.as_bytes().to_vec(), + )) + .json_body_includes(body); + then.status(500); + }) + .await + }; + + // Publish an event + let event = nexus + .webhook_event_publish( + &opctx, + id, + WebhookEventClass::TestFoo, + serde_json::json!({"hello_world": true}), + ) + .await + .expect("event should be published successfully"); + dbg!(event); + + dbg!(activate_background_task(internal_client, "webhook_dispatcher").await); + dbg!( + activate_background_task(internal_client, "webhook_deliverator").await + ); + + mock.assert_calls_async(1).await; + + // Okay, we are now in backoff. Activate the deliverator again --- no new + // event should be delivered. + dbg!( + activate_background_task(internal_client, "webhook_deliverator").await + ); + // Activating the deliverator whilst in backoff should not send another + // request. + mock.assert_calls_async(1).await; + mock.delete_async().await; + + // Okay, now let's return a different 5xx status. + let mock = { + let webhook = webhook.clone(); + server + .mock_async(move |when, then| { + let body = serde_json::json!({ + "event_class": "test.foo", + "event_id": id, + "data": { + "hello_world": true, + } + }) + .to_string(); + when.method(POST) + .header("x-oxide-event-class", "test.foo") + .header("x-oxide-event-id", id.to_string()) + .and(is_valid_for_webhook(&webhook)) + .is_true(signature_verifies( + webhook.secrets[0].id, + MY_COOL_SECRET.as_bytes().to_vec(), + )) + .json_body_includes(body); + then.status(503); + }) + .await + }; + + // Wait out the backoff period for the first request. + tokio::time::sleep(std::time::Duration::from_secs(15)).await; + dbg!( + activate_background_task(internal_client, "webhook_deliverator").await + ); + mock.assert_calls_async(1).await; + + // Again, we should be in backoff, so no request will be sent. + dbg!( + activate_background_task(internal_client, "webhook_deliverator").await + ); + mock.assert_calls_async(1).await; + mock.delete_async().await; + + // Finally, allow the request to succeed. + let mock = { + let webhook = webhook.clone(); + server + .mock_async(move |when, then| { + let body = serde_json::json!({ + "event_class": "test.foo", + "event_id": id, + "data": { + "hello_world": true, + } + }) + .to_string(); + when.method(POST) + .header("x-oxide-event-class", "test.foo") + .header("x-oxide-event-id", id.to_string()) + .and(is_valid_for_webhook(&webhook)) + .is_true(signature_verifies( + webhook.secrets[0].id, + MY_COOL_SECRET.as_bytes().to_vec(), + )) + .json_body_includes(body); + then.status(200); + }) + .await + }; + + // + tokio::time::sleep(std::time::Duration::from_secs(15)).await; + dbg!( + activate_background_task(internal_client, "webhook_deliverator").await + ); + mock.assert_calls_async(0).await; + + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + dbg!( + activate_background_task(internal_client, "webhook_deliverator").await + ); + mock.assert_async().await; +} + +#[nexus_test] +async fn test_probe(cptestctx: &ControlPlaneTestContext) { + let server = httpmock::MockServer::start_async().await; + + // Create a webhook receiver. + let webhook = + webhook_create(&cptestctx, &my_great_webhook_params(&server)).await; + dbg!(&webhook); + let rx_id = WebhookReceiverUuid::from_untyped_uuid(webhook.identity.id); + + let body = serde_json::json!({ + "event_class": "probe", + "data": {} + }) + .to_string(); + + // First, configure the receiver server to return a successful response but + // only after the delivery timeout has elapsed. + let mock = { + let webhook = webhook.clone(); + let body = body.clone(); + server + .mock_async(move |when, then| { + when.method(POST) + .header("x-oxide-event-class", "probe") + .and(is_valid_for_webhook(&webhook)) + .is_true(signature_verifies( + webhook.secrets[0].id, + MY_COOL_SECRET.as_bytes().to_vec(), + )) + .json_body_includes(body); + then + // Delivery timeout is 30 seconds. + // TODO(eliza): it would be really nice if this test didn't + // have to wait 30 seconds... + .delay(Duration::from_secs(35)) + // After the timeout, return something that would be considered + // a success. + .status(200); + }) + .await + }; + + // Send a probe. The probe should fail due to a timeout. + let probe1 = + webhook_send_probe(&cptestctx, &rx_id, false, http::StatusCode::OK) + .await; + dbg!(&probe1); + + mock.assert_async().await; + + assert_eq!( + probe1.probe.attempts[0].result, + views::WebhookDeliveryAttemptResult::FailedTimeout + ); + assert_eq!(probe1.probe.event_class, "probe"); + assert_eq!(probe1.probe.trigger, views::WebhookDeliveryTrigger::Probe); + assert_eq!(probe1.probe.state, views::WebhookDeliveryState::Failed); + assert_eq!( + probe1.resends_started, None, + "we did not request events be resent" + ); + + // Next, configure the receiver server to return a 5xx error + mock.delete_async().await; + let mock = { + let webhook = webhook.clone(); + let body = body.clone(); + server + .mock_async(move |when, then| { + when.method(POST) + .header("x-oxide-event-class", "probe") + .and(is_valid_for_webhook(&webhook)) + .is_true(signature_verifies( + webhook.secrets[0].id, + MY_COOL_SECRET.as_bytes().to_vec(), + )) + .json_body_includes(body); + then.status(503); + }) + .await + }; + + let probe2 = + webhook_send_probe(&cptestctx, &rx_id, false, http::StatusCode::OK) + .await; + dbg!(&probe2); + + mock.assert_async().await; + assert_eq!( + probe2.probe.attempts[0].result, + views::WebhookDeliveryAttemptResult::FailedHttpError + ); + assert_eq!(probe2.probe.event_class, "probe"); + assert_eq!(probe2.probe.trigger, views::WebhookDeliveryTrigger::Probe); + assert_eq!(probe2.probe.state, views::WebhookDeliveryState::Failed); + assert_ne!( + probe2.probe.id, probe1.probe.id, + "a new delivery ID should be assigned to each probe" + ); + assert_eq!( + probe2.resends_started, None, + "we did not request events be resent" + ); + + mock.delete_async().await; + // Finally, configure the receiver server to return a success. + let mock = { + let webhook = webhook.clone(); + let body = body.clone(); + server + .mock_async(move |when, then| { + when.method(POST) + .header("x-oxide-event-class", "probe") + .and(is_valid_for_webhook(&webhook)) + .is_true(signature_verifies( + webhook.secrets[0].id, + MY_COOL_SECRET.as_bytes().to_vec(), + )) + .json_body_includes(body); + then.status(200); + }) + .await + }; + + let probe3 = + webhook_send_probe(&cptestctx, &rx_id, false, http::StatusCode::OK) + .await; + dbg!(&probe3); + mock.assert_async().await; + assert_eq!( + probe3.probe.attempts[0].result, + views::WebhookDeliveryAttemptResult::Succeeded + ); + assert_eq!(probe3.probe.event_class, "probe"); + assert_eq!(probe3.probe.trigger, views::WebhookDeliveryTrigger::Probe); + assert_eq!(probe3.probe.state, views::WebhookDeliveryState::Delivered); + assert_ne!( + probe3.probe.id, probe1.probe.id, + "a new delivery ID should be assigned to each probe" + ); + assert_ne!( + probe3.probe.id, probe2.probe.id, + "a new delivery ID should be assigned to each probe" + ); + assert_eq!( + probe3.resends_started, None, + "we did not request events be resent" + ); +} + +#[nexus_test] +async fn test_probe_resends_failed_deliveries( + cptestctx: &ControlPlaneTestContext, +) { + let nexus = cptestctx.server.server_context().nexus.clone(); + let internal_client = &cptestctx.internal_client; + let server = httpmock::MockServer::start_async().await; + + let datastore = nexus.datastore(); + let opctx = + OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); + + // Create a webhook receiver. + let webhook = + webhook_create(&cptestctx, &my_great_webhook_params(&server)).await; + dbg!(&webhook); + + let event1_id = WebhookEventUuid::new_v4(); + let event2_id = WebhookEventUuid::new_v4(); + let mock = { + let webhook = webhook.clone(); + server + .mock_async(move |when, then| { + when.method(POST) + .header("x-oxide-event-class", "test.foo") + // either event + .header_matches( + "x-oxide-event-id", + format!("({event1_id})|({event2_id})"), + ) + .and(is_valid_for_webhook(&webhook)) + .is_true(signature_verifies( + webhook.secrets[0].id, + MY_COOL_SECRET.as_bytes().to_vec(), + )); + then.status(500); + }) + .await + }; + + // Publish both events + dbg!( + nexus + .webhook_event_publish( + &opctx, + event1_id, + WebhookEventClass::TestFoo, + serde_json::json!({"hello": "world"}), + ) + .await + .expect("event1 should be published successfully") + ); + dbg!( + nexus + .webhook_event_publish( + &opctx, + event2_id, + WebhookEventClass::TestFoo, + serde_json::json!({"hello": "emeryville"}), + ) + .await + .expect("event2 should be published successfully") + ); + + dbg!(activate_background_task(internal_client, "webhook_dispatcher").await); + dbg!( + activate_background_task(internal_client, "webhook_deliverator").await + ); + mock.assert_calls_async(2).await; + + // Backoff 1 + tokio::time::sleep(std::time::Duration::from_secs(11)).await; + dbg!( + activate_background_task(internal_client, "webhook_deliverator").await + ); + mock.assert_calls_async(4).await; + + // Backoff 2 + tokio::time::sleep(std::time::Duration::from_secs(22)).await; + dbg!( + activate_background_task(internal_client, "webhook_deliverator").await + ); + mock.assert_calls_async(6).await; + + mock.delete_async().await; + + // Allow a probe to succeed + let probe_mock = { + let webhook = webhook.clone(); + server + .mock_async(move |when, then| { + let body = serde_json::json!({ + "event_class": "probe", + "data": { + } + }) + .to_string(); + when.method(POST) + .header("x-oxide-event-class", "probe") + .and(is_valid_for_webhook(&webhook)) + .is_true(signature_verifies( + webhook.secrets[0].id, + MY_COOL_SECRET.as_bytes().to_vec(), + )) + .json_body_includes(body); + then.status(200); + }) + .await + }; + + // Allow events to succeed. + let mock = { + let webhook = webhook.clone(); + server + .mock_async(move |when, then| { + when.method(POST) + .header("x-oxide-event-class", "test.foo") + // either event + .header_matches( + "x-oxide-event-id", + format!("({event1_id})|({event2_id})"), + ) + .and(is_valid_for_webhook(&webhook)) + .is_true(signature_verifies( + webhook.secrets[0].id, + MY_COOL_SECRET.as_bytes().to_vec(), + )); + then.status(200); + }) + .await + }; + + // Send a probe with ?resend=true + let rx_id = WebhookReceiverUuid::from_untyped_uuid(webhook.identity.id); + let probe = + webhook_send_probe(&cptestctx, &rx_id, true, http::StatusCode::OK) + .await; + dbg!(&probe); + probe_mock.assert_async().await; + probe_mock.delete_async().await; + assert_eq!(probe.probe.state, views::WebhookDeliveryState::Delivered); + assert_eq!(probe.resends_started, Some(2)); + + // Both events should be resent. + dbg!( + activate_background_task(internal_client, "webhook_deliverator").await + ); + mock.assert_calls_async(2).await; +} + +#[nexus_test] +async fn test_api_resends_failed_deliveries( + cptestctx: &ControlPlaneTestContext, +) { + let nexus = cptestctx.server.server_context().nexus.clone(); + let internal_client = &cptestctx.internal_client; + let client = &cptestctx.external_client; + let server = httpmock::MockServer::start_async().await; + + let datastore = nexus.datastore(); + let opctx = + OpContext::for_tests(cptestctx.logctx.log.new(o!()), datastore.clone()); + + // Create a webhook receiver. + let webhook = + webhook_create(&cptestctx, &my_great_webhook_params(&server)).await; + dbg!(&webhook); + + let event1_id = WebhookEventUuid::new_v4(); + let event2_id = WebhookEventUuid::new_v4(); + let body = serde_json::json!({ + "event_class": "test.foo", + "event_id": event1_id, + "data": { + "hello_world": true, + } + }) + .to_string(); + let mock = { + let webhook = webhook.clone(); + let body = body.clone(); + server + .mock_async(move |when, then| { + when.method(POST) + .header("x-oxide-event-class", "test.foo") + .header("x-oxide-event-id", event1_id.to_string()) + .and(is_valid_for_webhook(&webhook)) + .is_true(signature_verifies( + webhook.secrets[0].id, + MY_COOL_SECRET.as_bytes().to_vec(), + )) + .json_body_includes(body); + then.status(500); + }) + .await + }; + + // Publish an event + let event1 = nexus + .webhook_event_publish( + &opctx, + event1_id, + WebhookEventClass::TestFoo, + serde_json::json!({"hello_world": true}), + ) + .await + .expect("event should be published successfully"); + dbg!(event1); + + // Publish another event that our receiver is not subscribed to. + let event2 = nexus + .webhook_event_publish( + &opctx, + event2_id, + WebhookEventClass::TestQuuxBar, + serde_json::json!({"hello_world": true}), + ) + .await + .expect("event should be published successfully"); + dbg!(event2); + + dbg!(activate_background_task(internal_client, "webhook_dispatcher").await); + dbg!( + activate_background_task(internal_client, "webhook_deliverator").await + ); + + tokio::time::sleep(std::time::Duration::from_secs(11)).await; + dbg!( + activate_background_task(internal_client, "webhook_deliverator").await + ); + tokio::time::sleep(std::time::Duration::from_secs(22)).await; + dbg!( + activate_background_task(internal_client, "webhook_deliverator").await + ); + + mock.assert_calls_async(3).await; + mock.delete_async().await; + + let mock = { + let webhook = webhook.clone(); + let body = body.clone(); + server + .mock_async(move |when, then| { + when.method(POST) + .header("x-oxide-event-class", "test.foo") + .header("x-oxide-event-id", event1_id.to_string()) + .and(is_valid_for_webhook(&webhook)) + .is_true(signature_verifies( + webhook.secrets[0].id, + MY_COOL_SECRET.as_bytes().to_vec(), + )) + .json_body_includes(body); + then.status(200); + }) + .await + }; + + // Try to resend event 1. + let delivery = + webhook_delivery_resend(client, webhook.identity.id, event1_id).await; + dbg!(delivery); + + // Try to resend event 2. This should fail, as the receiver is not + // subscribed to this event class. + let error = webhook_delivery_resend_error( + client, + webhook.identity.id, + event2_id, + http::StatusCode::BAD_REQUEST, + ) + .await; + dbg!(error); + + dbg!( + activate_background_task(internal_client, "webhook_deliverator").await + ); + mock.assert_calls_async(1).await; +} diff --git a/nexus/tests/output/uncovered-authz-endpoints.txt b/nexus/tests/output/uncovered-authz-endpoints.txt index 8a639f1224c..258d9065fe6 100644 --- a/nexus/tests/output/uncovered-authz-endpoints.txt +++ b/nexus/tests/output/uncovered-authz-endpoints.txt @@ -19,3 +19,4 @@ login_saml (post "/login/{silo_name}/saml/{provi login_local (post "/v1/login/{silo_name}/local") logout (post "/v1/logout") networking_switch_port_lldp_config_update (post "/v1/system/hardware/switch-port/{port}/lldp/config") +webhook_delivery_resend (post "/v1/webhooks/deliveries/{event_id}/resend") diff --git a/nexus/types/Cargo.toml b/nexus/types/Cargo.toml index dd01d8a242d..36e1ffe0585 100644 --- a/nexus/types/Cargo.toml +++ b/nexus/types/Cargo.toml @@ -31,7 +31,7 @@ openssl.workspace = true oxql-types.workspace = true oxnet.workspace = true parse-display.workspace = true -schemars = { workspace = true, features = ["chrono", "uuid1"] } +schemars = { workspace = true, features = ["chrono", "uuid1", "url"] } serde.workspace = true serde_json.workspace = true serde_with.workspace = true @@ -43,6 +43,7 @@ thiserror.workspace = true newtype-uuid.workspace = true update-engine.workspace = true uuid.workspace = true +url = { workspace = true, features = ["serde"] } api_identity.workspace = true gateway-client.workspace = true diff --git a/nexus/types/src/external_api/params.rs b/nexus/types/src/external_api/params.rs index 90f5e171d5f..36d7d32a3f1 100644 --- a/nexus/types/src/external_api/params.rs +++ b/nexus/types/src/external_api/params.rs @@ -29,6 +29,7 @@ use std::collections::BTreeMap; use std::collections::BTreeSet; use std::collections::HashMap; use std::{net::IpAddr, str::FromStr}; +use url::Url; use uuid::Uuid; macro_rules! path_param { @@ -2373,3 +2374,140 @@ pub struct DeviceAccessTokenRequest { pub device_code: String, pub client_id: Uuid, } + +// Webhooks + +/// Query params for listing webhook event classes. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct EventClassFilter { + /// An optional glob pattern for filtering event class names. + /// + /// If provided, only event classes which match this glob pattern will be + /// included in the response. + pub filter: Option, +} + +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct EventClassPage { + /// The last webhook event class returned by a previous page. + pub last_seen: String, +} + +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct WebhookReceiverSelector { + /// The name or ID of the webhook receiver. + pub receiver: NameOrId, +} + +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct WebhookCreate { + #[serde(flatten)] + pub identity: IdentityMetadataCreateParams, + + /// The URL that webhook notification requests should be sent to + pub endpoint: Url, + + /// A non-empty list of secret keys used to sign webhook payloads. + pub secrets: Vec, + + /// A list of webhook event classes to subscribe to. + /// + /// If this list is empty or is not included in the request body, the + /// webhook will not be subscribed to any events. + #[serde(default)] + pub events: Vec, +} + +/// Parameters to update a webhook configuration. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct WebhookReceiverUpdate { + #[serde(flatten)] + pub identity: IdentityMetadataUpdateParams, + + /// The URL that webhook notification requests should be sent to + pub endpoint: Option, + + /// A list of webhook event classes to subscribe to. + /// + /// If this list is empty, the webhook will not be subscribed to any events. + pub events: Option>, +} + +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct WebhookSecretCreate { + /// The value of the shared secret key. + pub secret: String, +} + +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct WebhookSecretSelector { + /// ID of the secret. + pub secret_id: Uuid, +} + +#[derive(Deserialize, JsonSchema)] +pub struct WebhookEventSelector { + /// UUID of the event + pub event_id: Uuid, +} + +#[derive(Copy, Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct WebhookDeliveryStateFilter { + /// If true, include only deliveries which are currently in progress. + /// + /// A delivery is considered "pending" if it has not yet been sent at all, + /// or if a delivery attempt has failed but the delivery has retries + /// remaining. + pub pending: Option, + /// If true, include only deliveries which have failed permanently. + /// + /// A delivery fails permanently when the retry limit of three total + /// attempts is reached without a successful delivery. + pub failed: Option, + /// If true, include only deliveries which have succeeded. + pub delivered: Option, +} + +impl Default for WebhookDeliveryStateFilter { + fn default() -> Self { + Self::ALL + } +} + +impl WebhookDeliveryStateFilter { + pub const ALL: Self = + Self { pending: Some(true), failed: Some(true), delivered: Some(true) }; + + pub fn include_pending(&self) -> bool { + self.pending == Some(true) || self.is_all_none() + } + + pub fn include_failed(&self) -> bool { + self.failed == Some(true) || self.is_all_none() + } + + pub fn include_delivered(&self) -> bool { + self.delivered == Some(true) || self.is_all_none() + } + + pub fn include_all(&self) -> bool { + self.is_all_none() + || (self.pending == Some(true) + && self.failed == Some(true) + && self.delivered == Some(true)) + } + + fn is_all_none(&self) -> bool { + self.pending.is_none() + && self.failed.is_none() + && self.delivered.is_none() + } +} + +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct WebhookProbe { + /// If true, resend all events that have not been delivered successfully if + /// the probe request succeeds. + #[serde(default)] + pub resend: bool, +} diff --git a/nexus/types/src/external_api/views.rs b/nexus/types/src/external_api/views.rs index f33036bf8c9..99919e8d0bc 100644 --- a/nexus/types/src/external_api/views.rs +++ b/nexus/types/src/external_api/views.rs @@ -17,6 +17,7 @@ use omicron_common::api::external::{ Digest, Error, FailureDomain, IdentityMetadata, InstanceState, Name, ObjectIdentity, RoleName, SimpleIdentityOrName, }; +use omicron_uuid_kinds::{WebhookEventUuid, WebhookReceiverUuid}; use oxnet::{Ipv4Net, Ipv6Net}; use schemars::JsonSchema; use semver::Version; @@ -26,6 +27,7 @@ use std::collections::BTreeSet; use std::fmt; use std::net::IpAddr; use strum::{EnumIter, IntoEnumIterator}; +use url::Url; use uuid::Uuid; use super::params::PhysicalDiskKind; @@ -1052,6 +1054,246 @@ pub struct OxqlQueryResult { pub tables: Vec, } +// WEBHOOKS + +/// A webhook event class. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct EventClass { + /// The name of the event class. + pub name: String, + + /// A description of what this event class represents. + pub description: String, +} + +/// The configuration for a webhook. +#[derive( + ObjectIdentity, Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq, +)] +pub struct WebhookReceiver { + #[serde(flatten)] + pub identity: IdentityMetadata, + + /// The URL that webhook notification requests are sent to. + pub endpoint: Url, + // A list containing the IDs of the secret keys used to sign payloads sent + // to this receiver. + pub secrets: Vec, + /// The list of event classes to which this receiver is subscribed. + pub events: Vec, +} + +/// A list of the IDs of secrets associated with a webhook. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +pub struct WebhookSecrets { + pub secrets: Vec, +} + +/// The public ID of a secret key assigned to a webhook. +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema, PartialEq)] +pub struct WebhookSecretId { + pub id: Uuid, +} + +/// A delivery of a webhook event. +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] +pub struct WebhookDelivery { + /// The UUID of this delivery attempt. + pub id: Uuid, + + /// The UUID of the webhook receiver that this event was delivered to. + pub webhook_id: WebhookReceiverUuid, + + /// The event class. + pub event_class: String, + + /// The UUID of the event. + pub event_id: WebhookEventUuid, + + /// The state of this delivery. + pub state: WebhookDeliveryState, + + /// Why this delivery was performed. + pub trigger: WebhookDeliveryTrigger, + + /// Individual attempts to deliver this webhook event, and their outcomes. + pub attempts: Vec, + + /// The time at which this delivery began (i.e. the event was dispatched to + /// the receiver). + pub time_started: DateTime, +} + +/// The state of a webhook delivery attempt. +#[derive( + Copy, + Clone, + Debug, + Eq, + PartialEq, + Deserialize, + Serialize, + JsonSchema, + strum::VariantArray, +)] +#[serde(rename_all = "snake_case")] +pub enum WebhookDeliveryState { + /// The webhook event has not yet been delivered successfully. + /// + /// Either no delivery attempts have yet been performed, or the delivery has + /// failed at least once but has retries remaining. + Pending, + /// The webhook event has been delivered successfully. + Delivered, + /// The webhook delivery attempt has failed permanently and will not be + /// retried again. + Failed, +} + +impl WebhookDeliveryState { + pub const ALL: &[Self] = ::VARIANTS; + + pub fn as_str(&self) -> &'static str { + match self { + Self::Pending => "pending", + Self::Delivered => "delivered", + Self::Failed => "failed", + } + } +} + +impl fmt::Display for WebhookDeliveryState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +/// The reason a webhook event was delivered +#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize, JsonSchema)] +#[serde(rename_all = "snake_case")] +pub enum WebhookDeliveryTrigger { + /// Delivery was triggered by the event occurring for the first time. + Event, + /// Delivery was triggered by a request to resend the event. + Resend, + /// This delivery is a liveness probe. + Probe, +} + +impl WebhookDeliveryTrigger { + pub fn as_str(&self) -> &'static str { + match self { + Self::Event => "event", + Self::Resend => "resend", + Self::Probe => "probe", + } + } +} + +impl fmt::Display for WebhookDeliveryTrigger { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +/// An individual delivery attempt for a webhook event. +/// +/// This represents a single HTTP request that was sent to the receiver, and its +/// outcome. +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] +pub struct WebhookDeliveryAttempt { + /// The time at which the webhook delivery was attempted. + pub time_sent: DateTime, + + /// The attempt number. + pub attempt: usize, + + /// The outcome of this delivery attempt: either the event was delivered + /// successfully, or the request failed for one of several reasons. + pub result: WebhookDeliveryAttemptResult, + + pub response: Option, +} + +#[derive( + Clone, + Debug, + PartialEq, + Eq, + Deserialize, + Serialize, + JsonSchema, + strum::VariantArray, +)] +#[serde(rename_all = "snake_case")] +pub enum WebhookDeliveryAttemptResult { + /// The webhook event has been delivered successfully. + Succeeded, + /// A webhook request was sent to the endpoint, and it + /// returned a HTTP error status code indicating an error. + FailedHttpError, + /// The webhook request could not be sent to the receiver endpoint. + FailedUnreachable, + /// A connection to the receiver endpoint was successfully established, but + /// no response was received within the delivery timeout. + FailedTimeout, +} + +impl WebhookDeliveryAttemptResult { + pub const ALL: &[Self] = ::VARIANTS; + pub const ALL_FAILED: &[Self] = + &[Self::FailedHttpError, Self::FailedUnreachable, Self::FailedTimeout]; + + pub fn as_str(&self) -> &'static str { + match self { + Self::Succeeded => "succeeded", + Self::FailedHttpError => "failed_http_error", + Self::FailedTimeout => "failed_timeout", + Self::FailedUnreachable => "failed_unreachable", + } + } + + /// Returns `true` if this `WebhookDeliveryAttemptResult` represents a failure + pub fn is_failed(&self) -> bool { + *self != Self::Succeeded + } +} + +impl fmt::Display for WebhookDeliveryAttemptResult { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +/// The response received from a webhook receiver endpoint. +#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize, JsonSchema)] +pub struct WebhookDeliveryResponse { + /// The HTTP status code returned from the webhook endpoint. + pub status: u16, + /// The response time of the webhook endpoint, in milliseconds. + pub duration_ms: usize, +} + +#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize, JsonSchema)] +pub struct WebhookDeliveryId { + pub delivery_id: Uuid, +} + +/// Data describing the result of a webhook liveness probe attempt. +#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize, JsonSchema)] +pub struct WebhookProbeResult { + /// The outcome of the probe request. + pub probe: WebhookDelivery, + /// If the probe request succeeded, and resending failed deliveries on + /// success was requested, the number of new delivery attempts started. + /// Otherwise, if the probe did not succeed, or resending failed deliveries + /// was not requested, this is null. + /// + /// Note that this may be 0, if there were no events found which had not + /// been delivered successfully to this receiver. + pub resends_started: Option, +} + // UPDATE /// Source of a system software target release. diff --git a/nexus/types/src/internal_api/background.rs b/nexus/types/src/internal_api/background.rs index 67d0688bd23..5434c28eb3e 100644 --- a/nexus/types/src/internal_api/background.rs +++ b/nexus/types/src/internal_api/background.rs @@ -2,6 +2,7 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +use crate::external_api::views; use chrono::DateTime; use chrono::Utc; use omicron_common::api::external::Generation; @@ -10,6 +11,9 @@ use omicron_uuid_kinds::BlueprintUuid; use omicron_uuid_kinds::CollectionUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::SupportBundleUuid; +use omicron_uuid_kinds::WebhookDeliveryUuid; +use omicron_uuid_kinds::WebhookEventUuid; +use omicron_uuid_kinds::WebhookReceiverUuid; use serde::Deserialize; use serde::Serialize; use std::collections::BTreeMap; @@ -451,6 +455,69 @@ impl slog::KV for DebugDatasetsRendezvousStats { } } +/// The status of a `webhook_dispatcher` background task activation. +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +pub struct WebhookDispatcherStatus { + pub globs_reprocessed: BTreeMap, + + pub glob_version: semver::Version, + + /// The webhook events dispatched on this activation. + pub dispatched: Vec, + + /// Webhook events which did not have receivers. + pub no_receivers: Vec, + + /// Any errors that occurred during activation. + pub errors: Vec, +} + +type ReprocessedGlobs = BTreeMap>; + +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +pub enum WebhookGlobStatus { + AlreadyReprocessed, + Reprocessed { + created: usize, + deleted: usize, + prev_version: semver::Version, + }, +} + +#[derive(Debug, Copy, Clone, Eq, PartialEq, Serialize, Deserialize)] +pub struct WebhookDispatched { + pub event_id: WebhookEventUuid, + pub subscribed: usize, + pub dispatched: usize, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct WebhookDeliveratorStatus { + pub by_rx: BTreeMap, + pub error: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct WebhookRxDeliveryStatus { + pub ready: usize, + pub delivered_ok: usize, + pub already_delivered: usize, + pub in_progress: usize, + pub failed_deliveries: Vec, + pub delivery_errors: BTreeMap, + pub error: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct WebhookDeliveryFailure { + pub delivery_id: WebhookDeliveryUuid, + pub event_id: WebhookEventUuid, + pub attempt: usize, + pub result: views::WebhookDeliveryAttemptResult, + pub response_status: Option, + pub response_duration: Option, +} + /// The status of a `read_only_region_replacement_start` background task /// activation #[derive(Serialize, Deserialize, Default, Debug, PartialEq, Eq)] diff --git a/openapi/nexus.json b/openapi/nexus.json index 43485176b82..f05b6755c77 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -11929,6 +11929,575 @@ } } } + }, + "/v1/webhooks/deliveries": { + "get": { + "tags": [ + "system/webhooks" + ], + "summary": "List delivery attempts to a webhook receiver", + "description": "Optional query parameters to this endpoint may be used to filter deliveries by state. If none of the `failed`, `pending` or `delivered` query parameters are present, all deliveries are returned. If one or more of these parameters are provided, only those which are set to \"true\" are included in the response.", + "operationId": "webhook_delivery_list", + "parameters": [ + { + "in": "query", + "name": "receiver", + "description": "The name or ID of the webhook receiver.", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "delivered", + "description": "If true, include only deliveries which have succeeded.", + "schema": { + "nullable": true, + "type": "boolean" + } + }, + { + "in": "query", + "name": "failed", + "description": "If true, include only deliveries which have failed permanently.\n\nA delivery fails permanently when the retry limit of three total attempts is reached without a successful delivery.", + "schema": { + "nullable": true, + "type": "boolean" + } + }, + { + "in": "query", + "name": "pending", + "description": "If true, include only deliveries which are currently in progress.\n\nA delivery is considered \"pending\" if it has not yet been sent at all, or if a delivery attempt has failed but the delivery has retries remaining.", + "schema": { + "nullable": true, + "type": "boolean" + } + }, + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/TimeAndIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WebhookDeliveryResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/webhooks/deliveries/{event_id}/resend": { + "post": { + "tags": [ + "system/webhooks" + ], + "summary": "Request re-delivery of webhook event", + "operationId": "webhook_delivery_resend", + "parameters": [ + { + "in": "path", + "name": "event_id", + "description": "UUID of the event", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + }, + { + "in": "query", + "name": "receiver", + "description": "The name or ID of the webhook receiver.", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WebhookDeliveryId" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/webhooks/event-classes": { + "get": { + "tags": [ + "system/webhooks" + ], + "summary": "List webhook event classes", + "operationId": "webhook_event_class_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "filter", + "description": "An optional glob pattern for filtering event class names.\n\nIf provided, only event classes which match this glob pattern will be included in the response.", + "schema": { + "nullable": true, + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EventClassResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + } + }, + "/v1/webhooks/receivers": { + "get": { + "tags": [ + "system/webhooks" + ], + "summary": "List webhook receivers.", + "operationId": "webhook_receiver_list", + "parameters": [ + { + "in": "query", + "name": "limit", + "description": "Maximum number of items returned by a single call", + "schema": { + "nullable": true, + "type": "integer", + "format": "uint32", + "minimum": 1 + } + }, + { + "in": "query", + "name": "page_token", + "description": "Token returned by previous call to retrieve the subsequent page", + "schema": { + "nullable": true, + "type": "string" + } + }, + { + "in": "query", + "name": "sort_by", + "schema": { + "$ref": "#/components/schemas/NameOrIdSortMode" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WebhookReceiverResultsPage" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + }, + "x-dropshot-pagination": { + "required": [] + } + }, + "post": { + "tags": [ + "system/webhooks" + ], + "summary": "Create webhook receiver.", + "operationId": "webhook_receiver_create", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WebhookCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WebhookReceiver" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/webhooks/receivers/{receiver}": { + "get": { + "tags": [ + "system/webhooks" + ], + "summary": "Fetch webhook receiver", + "operationId": "webhook_receiver_view", + "parameters": [ + { + "in": "path", + "name": "receiver", + "description": "The name or ID of the webhook receiver.", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WebhookReceiver" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "put": { + "tags": [ + "system/webhooks" + ], + "summary": "Update webhook receiver", + "description": "Note that receiver secrets are NOT added or removed using this endpoint. Instead, use the `/v1/webhooks/{secrets}/?receiver={receiver}` endpoint to add and remove secrets.", + "operationId": "webhook_receiver_update", + "parameters": [ + { + "in": "path", + "name": "receiver", + "description": "The name or ID of the webhook receiver.", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WebhookReceiverUpdate" + } + } + }, + "required": true + }, + "responses": { + "204": { + "description": "resource updated" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "delete": { + "tags": [ + "system/webhooks" + ], + "summary": "Delete webhook receiver.", + "operationId": "webhook_receiver_delete", + "parameters": [ + { + "in": "path", + "name": "receiver", + "description": "The name or ID of the webhook receiver.", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/webhooks/receivers/{receiver}/probe": { + "post": { + "tags": [ + "system/webhooks" + ], + "summary": "Send liveness probe to webhook receiver", + "description": "This endpoint synchronously sends a liveness probe request to the selected webhook receiver. The response message describes the outcome of the probe request: either the response from the receiver endpoint, or an indication of why the probe failed.\n\nNote that the response status is `200 OK` as long as a probe request was able to be sent to the receiver endpoint. If the receiver responds with another status code, including an error, this will be indicated by the response body, *not* the status of the response.\n\nThe `resend` query parameter can be used to request re-delivery of failed events if the liveness probe succeeds. If it is set to true and the webhook receiver responds to the probe request with a `2xx` status code, any events for which delivery to this receiver has failed will be queued for re-delivery.", + "operationId": "webhook_receiver_probe", + "parameters": [ + { + "in": "path", + "name": "receiver", + "description": "The name or ID of the webhook receiver.", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + }, + { + "in": "query", + "name": "resend", + "description": "If true, resend all events that have not been delivered successfully if the probe request succeeds.", + "schema": { + "type": "boolean" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WebhookProbeResult" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/webhooks/secrets": { + "get": { + "tags": [ + "system/webhooks" + ], + "summary": "List webhook receiver secret IDs", + "operationId": "webhook_secrets_list", + "parameters": [ + { + "in": "query", + "name": "receiver", + "description": "The name or ID of the webhook receiver.", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WebhookSecrets" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + }, + "post": { + "tags": [ + "system/webhooks" + ], + "summary": "Add secret to webhook receiver", + "operationId": "webhook_secrets_add", + "parameters": [ + { + "in": "query", + "name": "receiver", + "description": "The name or ID of the webhook receiver.", + "required": true, + "schema": { + "$ref": "#/components/schemas/NameOrId" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WebhookSecretCreate" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "successful creation", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/WebhookSecretId" + } + } + } + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } + }, + "/v1/webhooks/secrets/{secret_id}": { + "delete": { + "tags": [ + "system/webhooks" + ], + "summary": "Remove secret from webhook receiver", + "operationId": "webhook_secrets_delete", + "parameters": [ + { + "in": "path", + "name": "secret_id", + "description": "ID of the secret.", + "required": true, + "schema": { + "type": "string", + "format": "uuid" + } + } + ], + "responses": { + "204": { + "description": "successful deletion" + }, + "4XX": { + "$ref": "#/components/responses/Error" + }, + "5XX": { + "$ref": "#/components/responses/Error" + } + } + } } }, "components": { @@ -15815,19 +16384,58 @@ "description": "Error information from a response.", "type": "object", "properties": { - "error_code": { + "error_code": { + "type": "string" + }, + "message": { + "type": "string" + }, + "request_id": { + "type": "string" + } + }, + "required": [ + "message", + "request_id" + ] + }, + "EventClass": { + "description": "A webhook event class.", + "type": "object", + "properties": { + "description": { + "description": "A description of what this event class represents.", "type": "string" }, - "message": { + "name": { + "description": "The name of the event class.", "type": "string" + } + }, + "required": [ + "description", + "name" + ] + }, + "EventClassResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/EventClass" + } }, - "request_id": { + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", "type": "string" } }, "required": [ - "message", - "request_id" + "items" ] }, "ExternalIp": { @@ -20267,118 +20875,496 @@ "viewer" ] }, - "ProjectRolePolicy": { - "description": "Policy for a particular resource\n\nNote that the Policy only describes access granted explicitly for this resource. The policies of parent resources can also cause a user to have access to this resource.", + "ProjectRolePolicy": { + "description": "Policy for a particular resource\n\nNote that the Policy only describes access granted explicitly for this resource. The policies of parent resources can also cause a user to have access to this resource.", + "type": "object", + "properties": { + "role_assignments": { + "description": "Roles directly assigned on this resource", + "type": "array", + "items": { + "$ref": "#/components/schemas/ProjectRoleRoleAssignment" + } + } + }, + "required": [ + "role_assignments" + ] + }, + "ProjectRoleRoleAssignment": { + "description": "Describes the assignment of a particular role on a particular resource to a particular identity (user, group, etc.)\n\nThe resource is not part of this structure. Rather, `RoleAssignment`s are put into a `Policy` and that Policy is applied to a particular resource.", + "type": "object", + "properties": { + "identity_id": { + "type": "string", + "format": "uuid" + }, + "identity_type": { + "$ref": "#/components/schemas/IdentityType" + }, + "role_name": { + "$ref": "#/components/schemas/ProjectRole" + } + }, + "required": [ + "identity_id", + "identity_type", + "role_name" + ] + }, + "ProjectUpdate": { + "description": "Updateable properties of a `Project`", + "type": "object", + "properties": { + "description": { + "nullable": true, + "type": "string" + }, + "name": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + } + } + }, + "Quantile": { + "description": "Structure for estimating the p-quantile of a population.\n\nThis is based on the P² algorithm for estimating quantiles using constant space.\n\nThe algorithm consists of maintaining five markers: the minimum, the p/2-, p-, and (1 + p)/2 quantiles, and the maximum.", + "type": "object", + "properties": { + "desired_marker_positions": { + "description": "The desired marker positions.", + "type": "array", + "items": { + "type": "number", + "format": "double" + }, + "minItems": 5, + "maxItems": 5 + }, + "marker_heights": { + "description": "The heights of the markers.", + "type": "array", + "items": { + "type": "number", + "format": "double" + }, + "minItems": 5, + "maxItems": 5 + }, + "marker_positions": { + "description": "The positions of the markers.\n\nWe track sample size in the 5th position, as useful observations won't start until we've filled the heights at the 6th sample anyway This does deviate from the paper, but it's a more useful representation that works according to the paper's algorithm.", + "type": "array", + "items": { + "type": "integer", + "format": "uint64", + "minimum": 0 + }, + "minItems": 5, + "maxItems": 5 + }, + "p": { + "description": "The p value for the quantile.", + "type": "number", + "format": "double" + } + }, + "required": [ + "desired_marker_positions", + "marker_heights", + "marker_positions", + "p" + ] + }, + "Rack": { + "description": "View of an Rack", + "type": "object", + "properties": { + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "id", + "time_created", + "time_modified" + ] + }, + "RackResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Rack" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "Role": { + "description": "View of a Role", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "name": { + "$ref": "#/components/schemas/RoleName" + } + }, + "required": [ + "description", + "name" + ] + }, + "RoleName": { + "title": "A name for a built-in role", + "description": "Role names consist of two string components separated by dot (\".\").", + "type": "string", + "pattern": "[a-z-]+\\.[a-z-]+", + "maxLength": 63 + }, + "RoleResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Role" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "Route": { + "description": "A route to a destination network through a gateway address.", + "type": "object", + "properties": { + "dst": { + "description": "The route destination.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNet" + } + ] + }, + "gw": { + "description": "The route gateway.", + "type": "string", + "format": "ip" + }, + "rib_priority": { + "nullable": true, + "description": "Local preference for route. Higher preference indictes precedence within and across protocols.", + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "vid": { + "nullable": true, + "description": "VLAN id the gateway is reachable over.", + "type": "integer", + "format": "uint16", + "minimum": 0 + } + }, + "required": [ + "dst", + "gw" + ] + }, + "RouteConfig": { + "description": "Route configuration data associated with a switch port configuration.", "type": "object", "properties": { - "role_assignments": { - "description": "Roles directly assigned on this resource", + "routes": { + "description": "The set of routes assigned to a switch port.", "type": "array", "items": { - "$ref": "#/components/schemas/ProjectRoleRoleAssignment" + "$ref": "#/components/schemas/Route" } } }, "required": [ - "role_assignments" + "routes" ] }, - "ProjectRoleRoleAssignment": { - "description": "Describes the assignment of a particular role on a particular resource to a particular identity (user, group, etc.)\n\nThe resource is not part of this structure. Rather, `RoleAssignment`s are put into a `Policy` and that Policy is applied to a particular resource.", - "type": "object", - "properties": { - "identity_id": { - "type": "string", - "format": "uuid" + "RouteDestination": { + "description": "A `RouteDestination` is used to match traffic with a routing rule based on the destination of that traffic.\n\nWhen traffic is to be sent to a destination that is within a given `RouteDestination`, the corresponding `RouterRoute` applies, and traffic will be forward to the `RouteTarget` for that rule.", + "oneOf": [ + { + "description": "Route applies to traffic destined for the specified IP address", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "ip" + ] + }, + "value": { + "type": "string", + "format": "ip" + } + }, + "required": [ + "type", + "value" + ] }, - "identity_type": { - "$ref": "#/components/schemas/IdentityType" + { + "description": "Route applies to traffic destined for the specified IP subnet", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "ip_net" + ] + }, + "value": { + "$ref": "#/components/schemas/IpNet" + } + }, + "required": [ + "type", + "value" + ] }, - "role_name": { - "$ref": "#/components/schemas/ProjectRole" + { + "description": "Route applies to traffic destined for the specified VPC", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "vpc" + ] + }, + "value": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "type", + "value" + ] + }, + { + "description": "Route applies to traffic destined for the specified VPC subnet", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "subnet" + ] + }, + "value": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "type", + "value" + ] } - }, - "required": [ - "identity_id", - "identity_type", - "role_name" ] }, - "ProjectUpdate": { - "description": "Updateable properties of a `Project`", - "type": "object", - "properties": { - "description": { - "nullable": true, - "type": "string" + "RouteTarget": { + "description": "A `RouteTarget` describes the possible locations that traffic matching a route destination can be sent.", + "oneOf": [ + { + "description": "Forward traffic to a particular IP address.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "ip" + ] + }, + "value": { + "type": "string", + "format": "ip" + } + }, + "required": [ + "type", + "value" + ] }, - "name": { - "nullable": true, - "allOf": [ - { + { + "description": "Forward traffic to a VPC", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "vpc" + ] + }, + "value": { "$ref": "#/components/schemas/Name" } + }, + "required": [ + "type", + "value" ] - } - } - }, - "Quantile": { - "description": "Structure for estimating the p-quantile of a population.\n\nThis is based on the P² algorithm for estimating quantiles using constant space.\n\nThe algorithm consists of maintaining five markers: the minimum, the p/2-, p-, and (1 + p)/2 quantiles, and the maximum.", - "type": "object", - "properties": { - "desired_marker_positions": { - "description": "The desired marker positions.", - "type": "array", - "items": { - "type": "number", - "format": "double" + }, + { + "description": "Forward traffic to a VPC Subnet", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "subnet" + ] + }, + "value": { + "$ref": "#/components/schemas/Name" + } }, - "minItems": 5, - "maxItems": 5 + "required": [ + "type", + "value" + ] }, - "marker_heights": { - "description": "The heights of the markers.", - "type": "array", - "items": { - "type": "number", - "format": "double" + { + "description": "Forward traffic to a specific instance", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "instance" + ] + }, + "value": { + "$ref": "#/components/schemas/Name" + } }, - "minItems": 5, - "maxItems": 5 + "required": [ + "type", + "value" + ] }, - "marker_positions": { - "description": "The positions of the markers.\n\nWe track sample size in the 5th position, as useful observations won't start until we've filled the heights at the 6th sample anyway This does deviate from the paper, but it's a more useful representation that works according to the paper's algorithm.", - "type": "array", - "items": { - "type": "integer", - "format": "uint64", - "minimum": 0 + { + "description": "Forward traffic to an internet gateway", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "internet_gateway" + ] + }, + "value": { + "$ref": "#/components/schemas/Name" + } }, - "minItems": 5, - "maxItems": 5 + "required": [ + "type", + "value" + ] }, - "p": { - "description": "The p value for the quantile.", - "type": "number", - "format": "double" + { + "description": "Drop matching traffic", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "drop" + ] + } + }, + "required": [ + "type" + ] } - }, - "required": [ - "desired_marker_positions", - "marker_heights", - "marker_positions", - "p" ] }, - "Rack": { - "description": "View of an Rack", + "RouterRoute": { + "description": "A route defines a rule that governs where traffic should be sent based on its destination.", "type": "object", "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "destination": { + "description": "Selects which traffic this routing rule will apply to", + "allOf": [ + { + "$ref": "#/components/schemas/RouteDestination" + } + ] + }, "id": { "description": "unique, immutable, system-controlled identifier for each resource", "type": "string", "format": "uuid" }, + "kind": { + "description": "Describes the kind of router. Set at creation. `read-only`", + "allOf": [ + { + "$ref": "#/components/schemas/RouterRouteKind" + } + ] + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "target": { + "description": "The location that matched packets should be forwarded to", + "allOf": [ + { + "$ref": "#/components/schemas/RouteTarget" + } + ] + }, "time_created": { "description": "timestamp when this resource was created", "type": "string", @@ -20388,59 +21374,93 @@ "description": "timestamp when this resource was last modified", "type": "string", "format": "date-time" + }, + "vpc_router_id": { + "description": "The ID of the VPC Router to which the route belongs", + "type": "string", + "format": "uuid" } }, "required": [ + "description", + "destination", "id", + "kind", + "name", + "target", "time_created", - "time_modified" - ] - }, - "RackResultsPage": { - "description": "A single page of results", - "type": "object", - "properties": { - "items": { - "description": "list of items on this page of results", - "type": "array", - "items": { - "$ref": "#/components/schemas/Rack" - } - }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", - "type": "string" - } - }, - "required": [ - "items" + "time_modified", + "vpc_router_id" ] }, - "Role": { - "description": "View of a Role", + "RouterRouteCreate": { + "description": "Create-time parameters for a `RouterRoute`", "type": "object", "properties": { "description": { "type": "string" }, + "destination": { + "description": "Selects which traffic this routing rule will apply to.", + "allOf": [ + { + "$ref": "#/components/schemas/RouteDestination" + } + ] + }, "name": { - "$ref": "#/components/schemas/RoleName" + "$ref": "#/components/schemas/Name" + }, + "target": { + "description": "The location that matched packets should be forwarded to.", + "allOf": [ + { + "$ref": "#/components/schemas/RouteTarget" + } + ] } }, "required": [ "description", - "name" + "destination", + "name", + "target" ] }, - "RoleName": { - "title": "A name for a built-in role", - "description": "Role names consist of two string components separated by dot (\".\").", - "type": "string", - "pattern": "[a-z-]+\\.[a-z-]+", - "maxLength": 63 + "RouterRouteKind": { + "description": "The kind of a `RouterRoute`\n\nThe kind determines certain attributes such as if the route is modifiable and describes how or where the route was created.", + "oneOf": [ + { + "description": "Determines the default destination of traffic, such as whether it goes to the internet or not.\n\n`Destination: An Internet Gateway` `Modifiable: true`", + "type": "string", + "enum": [ + "default" + ] + }, + { + "description": "Automatically added for each VPC Subnet in the VPC\n\n`Destination: A VPC Subnet` `Modifiable: false`", + "type": "string", + "enum": [ + "vpc_subnet" + ] + }, + { + "description": "Automatically added when VPC peering is established\n\n`Destination: A different VPC` `Modifiable: false`", + "type": "string", + "enum": [ + "vpc_peering" + ] + }, + { + "description": "Created by a user; see `RouteTarget`\n\n`Destination: User defined` `Modifiable: true`", + "type": "string", + "enum": [ + "custom" + ] + } + ] }, - "RoleResultsPage": { + "RouterRouteResultsPage": { "description": "A single page of results", "type": "object", "properties": { @@ -20448,7 +21468,7 @@ "description": "list of items on this page of results", "type": "array", "items": { - "$ref": "#/components/schemas/Role" + "$ref": "#/components/schemas/RouterRoute" } }, "next_page": { @@ -20461,286 +21481,246 @@ "items" ] }, - "Route": { - "description": "A route to a destination network through a gateway address.", + "RouterRouteUpdate": { + "description": "Updateable properties of a `RouterRoute`", "type": "object", "properties": { - "dst": { - "description": "The route destination.", - "allOf": [ - { - "$ref": "#/components/schemas/IpNet" - } - ] - }, - "gw": { - "description": "The route gateway.", - "type": "string", - "format": "ip" - }, - "rib_priority": { - "nullable": true, - "description": "Local preference for route. Higher preference indictes precedence within and across protocols.", - "type": "integer", - "format": "uint8", - "minimum": 0 - }, - "vid": { + "description": { "nullable": true, - "description": "VLAN id the gateway is reachable over.", - "type": "integer", - "format": "uint16", - "minimum": 0 - } - }, - "required": [ - "dst", - "gw" - ] - }, - "RouteConfig": { - "description": "Route configuration data associated with a switch port configuration.", - "type": "object", - "properties": { - "routes": { - "description": "The set of routes assigned to a switch port.", - "type": "array", - "items": { - "$ref": "#/components/schemas/Route" - } - } - }, - "required": [ - "routes" - ] - }, - "RouteDestination": { - "description": "A `RouteDestination` is used to match traffic with a routing rule based on the destination of that traffic.\n\nWhen traffic is to be sent to a destination that is within a given `RouteDestination`, the corresponding `RouterRoute` applies, and traffic will be forward to the `RouteTarget` for that rule.", - "oneOf": [ - { - "description": "Route applies to traffic destined for the specified IP address", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "ip" - ] - }, - "value": { - "type": "string", - "format": "ip" - } - }, - "required": [ - "type", - "value" - ] - }, - { - "description": "Route applies to traffic destined for the specified IP subnet", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "ip_net" - ] - }, - "value": { - "$ref": "#/components/schemas/IpNet" - } - }, - "required": [ - "type", - "value" - ] + "type": "string" }, - { - "description": "Route applies to traffic destined for the specified VPC", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "vpc" - ] - }, - "value": { - "$ref": "#/components/schemas/Name" + "destination": { + "description": "Selects which traffic this routing rule will apply to.", + "allOf": [ + { + "$ref": "#/components/schemas/RouteDestination" } - }, - "required": [ - "type", - "value" ] }, - { - "description": "Route applies to traffic destined for the specified VPC subnet", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "subnet" - ] - }, - "value": { + "name": { + "nullable": true, + "allOf": [ + { "$ref": "#/components/schemas/Name" } - }, - "required": [ - "type", - "value" + ] + }, + "target": { + "description": "The location that matched packets should be forwarded to.", + "allOf": [ + { + "$ref": "#/components/schemas/RouteTarget" + } ] } + }, + "required": [ + "destination", + "target" ] }, - "RouteTarget": { - "description": "A `RouteTarget` describes the possible locations that traffic matching a route destination can be sent.", - "oneOf": [ - { - "description": "Forward traffic to a particular IP address.", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "ip" - ] - }, - "value": { - "type": "string", - "format": "ip" - } - }, - "required": [ - "type", - "value" - ] + "SamlIdentityProvider": { + "description": "Identity-related metadata that's included in nearly all public API objects", + "type": "object", + "properties": { + "acs_url": { + "description": "Service provider endpoint where the response will be sent", + "type": "string" }, - { - "description": "Forward traffic to a VPC", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "vpc" - ] - }, - "value": { - "$ref": "#/components/schemas/Name" - } - }, - "required": [ - "type", - "value" - ] + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" }, - { - "description": "Forward traffic to a VPC Subnet", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "subnet" - ] - }, - "value": { + "group_attribute_name": { + "nullable": true, + "description": "If set, attributes with this name will be considered to denote a user's group membership, where the values will be the group names.", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "idp_entity_id": { + "description": "IdP's entity id", + "type": "string" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { "$ref": "#/components/schemas/Name" } - }, - "required": [ - "type", - "value" ] }, - { - "description": "Forward traffic to a specific instance", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "instance" - ] - }, - "value": { - "$ref": "#/components/schemas/Name" + "public_cert": { + "nullable": true, + "description": "Optional request signing public certificate (base64 encoded der file)", + "type": "string" + }, + "slo_url": { + "description": "Service provider endpoint where the idp should send log out requests", + "type": "string" + }, + "sp_client_id": { + "description": "SP's client id", + "type": "string" + }, + "technical_contact_email": { + "description": "Customer's technical contact for saml configuration", + "type": "string" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "acs_url", + "description", + "id", + "idp_entity_id", + "name", + "slo_url", + "sp_client_id", + "technical_contact_email", + "time_created", + "time_modified" + ] + }, + "SamlIdentityProviderCreate": { + "description": "Create-time identity-related parameters", + "type": "object", + "properties": { + "acs_url": { + "description": "service provider endpoint where the response will be sent", + "type": "string" + }, + "description": { + "type": "string" + }, + "group_attribute_name": { + "nullable": true, + "description": "If set, SAML attributes with this name will be considered to denote a user's group membership, where the attribute value(s) should be a comma-separated list of group names.", + "type": "string" + }, + "idp_entity_id": { + "description": "idp's entity id", + "type": "string" + }, + "idp_metadata_source": { + "description": "the source of an identity provider metadata descriptor", + "allOf": [ + { + "$ref": "#/components/schemas/IdpMetadataSource" } - }, - "required": [ - "type", - "value" ] }, - { - "description": "Forward traffic to an internet gateway", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "internet_gateway" - ] - }, - "value": { - "$ref": "#/components/schemas/Name" + "name": { + "$ref": "#/components/schemas/Name" + }, + "signing_keypair": { + "nullable": true, + "description": "request signing key pair", + "default": null, + "allOf": [ + { + "$ref": "#/components/schemas/DerEncodedKeyPair" } - }, - "required": [ - "type", - "value" ] }, + "slo_url": { + "description": "service provider endpoint where the idp should send log out requests", + "type": "string" + }, + "sp_client_id": { + "description": "sp's client id", + "type": "string" + }, + "technical_contact_email": { + "description": "customer's technical contact for saml configuration", + "type": "string" + } + }, + "required": [ + "acs_url", + "description", + "idp_entity_id", + "idp_metadata_source", + "name", + "slo_url", + "sp_client_id", + "technical_contact_email" + ] + }, + "ServiceUsingCertificate": { + "description": "The service intended to use this certificate.", + "oneOf": [ { - "description": "Drop matching traffic", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "drop" - ] - } - }, - "required": [ - "type" + "description": "This certificate is intended for access to the external API.", + "type": "string", + "enum": [ + "external_api" ] } ] }, - "RouterRoute": { - "description": "A route defines a rule that governs where traffic should be sent based on its destination.", + "SetTargetReleaseParams": { + "description": "Parameters for PUT requests to `/v1/system/update/target-release`.", + "type": "object", + "properties": { + "system_version": { + "description": "Version of the system software to make the target release.", + "type": "string", + "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" + } + }, + "required": [ + "system_version" + ] + }, + "Silo": { + "description": "View of a Silo\n\nA Silo is the highest level unit of isolation.", "type": "object", "properties": { "description": { "description": "human-readable free-form text about a resource", "type": "string" }, - "destination": { - "description": "Selects which traffic this routing rule will apply to", - "allOf": [ - { - "$ref": "#/components/schemas/RouteDestination" - } - ] + "discoverable": { + "description": "A silo where discoverable is false can be retrieved only by its id - it will not be part of the \"list all silos\" output.", + "type": "boolean" }, "id": { "description": "unique, immutable, system-controlled identifier for each resource", "type": "string", "format": "uuid" }, - "kind": { - "description": "Describes the kind of router. Set at creation. `read-only`", + "identity_mode": { + "description": "How users and groups are managed in this Silo", "allOf": [ { - "$ref": "#/components/schemas/RouterRouteKind" + "$ref": "#/components/schemas/SiloIdentityMode" } ] }, + "mapped_fleet_roles": { + "description": "Mapping of which Fleet roles are conferred by each Silo role\n\nThe default is that no Fleet roles are conferred by any Silo roles unless there's a corresponding entry in this map.", + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/FleetRole" + }, + "uniqueItems": true + } + }, "name": { "description": "unique, mutable, user-controlled identifier for each resource", "allOf": [ @@ -20749,14 +21729,6 @@ } ] }, - "target": { - "description": "The location that matched packets should be forwarded to", - "allOf": [ - { - "$ref": "#/components/schemas/RouteTarget" - } - ] - }, "time_created": { "description": "timestamp when this resource was created", "type": "string", @@ -20766,93 +21738,232 @@ "description": "timestamp when this resource was last modified", "type": "string", "format": "date-time" - }, - "vpc_router_id": { - "description": "The ID of the VPC Router to which the route belongs", - "type": "string", - "format": "uuid" } }, "required": [ "description", - "destination", + "discoverable", "id", - "kind", + "identity_mode", + "mapped_fleet_roles", "name", - "target", "time_created", - "time_modified", - "vpc_router_id" + "time_modified" ] }, - "RouterRouteCreate": { - "description": "Create-time parameters for a `RouterRoute`", + "SiloCreate": { + "description": "Create-time parameters for a `Silo`", "type": "object", "properties": { + "admin_group_name": { + "nullable": true, + "description": "If set, this group will be created during Silo creation and granted the \"Silo Admin\" role. Identity providers can assert that users belong to this group and those users can log in and further initialize the Silo.\n\nNote that if configuring a SAML based identity provider, group_attribute_name must be set for users to be considered part of a group. See `SamlIdentityProviderCreate` for more information.", + "type": "string" + }, "description": { "type": "string" }, - "destination": { - "description": "Selects which traffic this routing rule will apply to.", - "allOf": [ - { - "$ref": "#/components/schemas/RouteDestination" - } - ] + "discoverable": { + "type": "boolean" + }, + "identity_mode": { + "$ref": "#/components/schemas/SiloIdentityMode" + }, + "mapped_fleet_roles": { + "description": "Mapping of which Fleet roles are conferred by each Silo role\n\nThe default is that no Fleet roles are conferred by any Silo roles unless there's a corresponding entry in this map.", + "default": {}, + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "$ref": "#/components/schemas/FleetRole" + }, + "uniqueItems": true + } }, "name": { "$ref": "#/components/schemas/Name" }, - "target": { - "description": "The location that matched packets should be forwarded to.", + "quotas": { + "description": "Limits the amount of provisionable CPU, memory, and storage in the Silo. CPU and memory are only consumed by running instances, while storage is consumed by any disk or snapshot. A value of 0 means that resource is *not* provisionable.", "allOf": [ { - "$ref": "#/components/schemas/RouteTarget" + "$ref": "#/components/schemas/SiloQuotasCreate" } ] + }, + "tls_certificates": { + "description": "Initial TLS certificates to be used for the new Silo's console and API endpoints. These should be valid for the Silo's DNS name(s).", + "type": "array", + "items": { + "$ref": "#/components/schemas/CertificateCreate" + } } }, "required": [ "description", - "destination", + "discoverable", + "identity_mode", "name", - "target" + "quotas", + "tls_certificates" ] }, - "RouterRouteKind": { - "description": "The kind of a `RouterRoute`\n\nThe kind determines certain attributes such as if the route is modifiable and describes how or where the route was created.", + "SiloIdentityMode": { + "description": "Describes how identities are managed and users are authenticated in this Silo", "oneOf": [ { - "description": "Determines the default destination of traffic, such as whether it goes to the internet or not.\n\n`Destination: An Internet Gateway` `Modifiable: true`", + "description": "Users are authenticated with SAML using an external authentication provider. The system updates information about users and groups only during successful authentication (i.e,. \"JIT provisioning\" of users and groups).", "type": "string", "enum": [ - "default" + "saml_jit" ] }, { - "description": "Automatically added for each VPC Subnet in the VPC\n\n`Destination: A VPC Subnet` `Modifiable: false`", + "description": "The system is the source of truth about users. There is no linkage to an external authentication provider or identity provider.", "type": "string", "enum": [ - "vpc_subnet" + "local_only" + ] + } + ] + }, + "SiloIpPool": { + "description": "An IP pool in the context of a silo", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "is_default": { + "description": "When a pool is the default for a silo, floating IPs and instance ephemeral IPs will come from that pool when no other pool is specified. There can be at most one default for a given silo.", + "type": "boolean" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "id", + "is_default", + "name", + "time_created", + "time_modified" + ] + }, + "SiloIpPoolResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/SiloIpPool" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "SiloQuotas": { + "description": "A collection of resource counts used to set the virtual capacity of a silo", + "type": "object", + "properties": { + "cpus": { + "description": "Number of virtual CPUs", + "type": "integer", + "format": "int64" + }, + "memory": { + "description": "Amount of memory in bytes", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + }, + "silo_id": { + "type": "string", + "format": "uuid" + }, + "storage": { + "description": "Amount of disk storage in bytes", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } ] + } + }, + "required": [ + "cpus", + "memory", + "silo_id", + "storage" + ] + }, + "SiloQuotasCreate": { + "description": "The amount of provisionable resources for a Silo", + "type": "object", + "properties": { + "cpus": { + "description": "The amount of virtual CPUs available for running instances in the Silo", + "type": "integer", + "format": "int64" }, - { - "description": "Automatically added when VPC peering is established\n\n`Destination: A different VPC` `Modifiable: false`", - "type": "string", - "enum": [ - "vpc_peering" + "memory": { + "description": "The amount of RAM (in bytes) available for running instances in the Silo", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } ] }, - { - "description": "Created by a user; see `RouteTarget`\n\n`Destination: User defined` `Modifiable: true`", - "type": "string", - "enum": [ - "custom" + "storage": { + "description": "The amount of storage (in bytes) available for disks or snapshots", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } ] } + }, + "required": [ + "cpus", + "memory", + "storage" ] }, - "RouterRouteResultsPage": { + "SiloQuotasResultsPage": { "description": "A single page of results", "type": "object", "properties": { @@ -20860,7 +21971,7 @@ "description": "list of items on this page of results", "type": "array", "items": { - "$ref": "#/components/schemas/RouterRoute" + "$ref": "#/components/schemas/SiloQuotas" } }, "next_page": { @@ -20873,253 +21984,278 @@ "items" ] }, - "RouterRouteUpdate": { - "description": "Updateable properties of a `RouterRoute`", + "SiloQuotasUpdate": { + "description": "Updateable properties of a Silo's resource limits. If a value is omitted it will not be updated.", "type": "object", "properties": { - "description": { + "cpus": { "nullable": true, - "type": "string" + "description": "The amount of virtual CPUs available for running instances in the Silo", + "type": "integer", + "format": "int64" }, - "destination": { - "description": "Selects which traffic this routing rule will apply to.", + "memory": { + "nullable": true, + "description": "The amount of RAM (in bytes) available for running instances in the Silo", "allOf": [ { - "$ref": "#/components/schemas/RouteDestination" + "$ref": "#/components/schemas/ByteCount" } ] }, - "name": { + "storage": { "nullable": true, + "description": "The amount of storage (in bytes) available for disks or snapshots", "allOf": [ { - "$ref": "#/components/schemas/Name" + "$ref": "#/components/schemas/ByteCount" } ] + } + } + }, + "SiloResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Silo" + } }, - "target": { - "description": "The location that matched packets should be forwarded to.", - "allOf": [ - { - "$ref": "#/components/schemas/RouteTarget" - } - ] + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" } }, "required": [ - "destination", - "target" + "items" ] }, - "SamlIdentityProvider": { - "description": "Identity-related metadata that's included in nearly all public API objects", + "SiloRole": { + "type": "string", + "enum": [ + "admin", + "collaborator", + "viewer" + ] + }, + "SiloRolePolicy": { + "description": "Policy for a particular resource\n\nNote that the Policy only describes access granted explicitly for this resource. The policies of parent resources can also cause a user to have access to this resource.", "type": "object", "properties": { - "acs_url": { - "description": "Service provider endpoint where the response will be sent", - "type": "string" - }, - "description": { - "description": "human-readable free-form text about a resource", - "type": "string" - }, - "group_attribute_name": { - "nullable": true, - "description": "If set, attributes with this name will be considered to denote a user's group membership, where the values will be the group names.", - "type": "string" - }, - "id": { - "description": "unique, immutable, system-controlled identifier for each resource", + "role_assignments": { + "description": "Roles directly assigned on this resource", + "type": "array", + "items": { + "$ref": "#/components/schemas/SiloRoleRoleAssignment" + } + } + }, + "required": [ + "role_assignments" + ] + }, + "SiloRoleRoleAssignment": { + "description": "Describes the assignment of a particular role on a particular resource to a particular identity (user, group, etc.)\n\nThe resource is not part of this structure. Rather, `RoleAssignment`s are put into a `Policy` and that Policy is applied to a particular resource.", + "type": "object", + "properties": { + "identity_id": { "type": "string", "format": "uuid" }, - "idp_entity_id": { - "description": "IdP's entity id", - "type": "string" + "identity_type": { + "$ref": "#/components/schemas/IdentityType" }, - "name": { - "description": "unique, mutable, user-controlled identifier for each resource", + "role_name": { + "$ref": "#/components/schemas/SiloRole" + } + }, + "required": [ + "identity_id", + "identity_type", + "role_name" + ] + }, + "SiloUtilization": { + "description": "View of a silo's resource utilization and capacity", + "type": "object", + "properties": { + "allocated": { + "description": "Accounts for the total amount of resources reserved for silos via their quotas", "allOf": [ { - "$ref": "#/components/schemas/Name" + "$ref": "#/components/schemas/VirtualResourceCounts" } ] }, - "public_cert": { - "nullable": true, - "description": "Optional request signing public certificate (base64 encoded der file)", - "type": "string" - }, - "slo_url": { - "description": "Service provider endpoint where the idp should send log out requests", - "type": "string" - }, - "sp_client_id": { - "description": "SP's client id", - "type": "string" - }, - "technical_contact_email": { - "description": "Customer's technical contact for saml configuration", - "type": "string" + "provisioned": { + "description": "Accounts for resources allocated by in silos like CPU or memory for running instances and storage for disks and snapshots Note that CPU and memory resources associated with a stopped instances are not counted here", + "allOf": [ + { + "$ref": "#/components/schemas/VirtualResourceCounts" + } + ] }, - "time_created": { - "description": "timestamp when this resource was created", + "silo_id": { "type": "string", - "format": "date-time" + "format": "uuid" }, - "time_modified": { - "description": "timestamp when this resource was last modified", - "type": "string", - "format": "date-time" + "silo_name": { + "$ref": "#/components/schemas/Name" } }, "required": [ - "acs_url", - "description", - "id", - "idp_entity_id", - "name", - "slo_url", - "sp_client_id", - "technical_contact_email", - "time_created", - "time_modified" + "allocated", + "provisioned", + "silo_id", + "silo_name" ] }, - "SamlIdentityProviderCreate": { - "description": "Create-time identity-related parameters", + "SiloUtilizationResultsPage": { + "description": "A single page of results", "type": "object", "properties": { - "acs_url": { - "description": "service provider endpoint where the response will be sent", - "type": "string" - }, - "description": { - "type": "string" + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/SiloUtilization" + } }, - "group_attribute_name": { + "next_page": { "nullable": true, - "description": "If set, SAML attributes with this name will be considered to denote a user's group membership, where the attribute value(s) should be a comma-separated list of group names.", + "description": "token used to fetch the next page of results (if any)", "type": "string" + } + }, + "required": [ + "items" + ] + }, + "Sled": { + "description": "An operator's view of a Sled.", + "type": "object", + "properties": { + "baseboard": { + "$ref": "#/components/schemas/Baseboard" }, - "idp_entity_id": { - "description": "idp's entity id", - "type": "string" + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" }, - "idp_metadata_source": { - "description": "the source of an identity provider metadata descriptor", + "policy": { + "description": "The operator-defined policy of a sled.", + "allOf": [ + { + "$ref": "#/components/schemas/SledPolicy" + } + ] + }, + "rack_id": { + "description": "The rack to which this Sled is currently attached", + "type": "string", + "format": "uuid" + }, + "state": { + "description": "The current state Nexus believes the sled to be in.", "allOf": [ { - "$ref": "#/components/schemas/IdpMetadataSource" + "$ref": "#/components/schemas/SledState" } ] }, - "name": { - "$ref": "#/components/schemas/Name" + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" }, - "signing_keypair": { - "nullable": true, - "description": "request signing key pair", - "default": null, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + }, + "usable_hardware_threads": { + "description": "The number of hardware threads which can execute on this sled", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "usable_physical_ram": { + "description": "Amount of RAM which may be used by the Sled's OS", "allOf": [ { - "$ref": "#/components/schemas/DerEncodedKeyPair" + "$ref": "#/components/schemas/ByteCount" } ] - }, - "slo_url": { - "description": "service provider endpoint where the idp should send log out requests", - "type": "string" - }, - "sp_client_id": { - "description": "sp's client id", - "type": "string" - }, - "technical_contact_email": { - "description": "customer's technical contact for saml configuration", - "type": "string" } }, "required": [ - "acs_url", - "description", - "idp_entity_id", - "idp_metadata_source", - "name", - "slo_url", - "sp_client_id", - "technical_contact_email" - ] - }, - "ServiceUsingCertificate": { - "description": "The service intended to use this certificate.", - "oneOf": [ - { - "description": "This certificate is intended for access to the external API.", - "type": "string", - "enum": [ - "external_api" - ] - } + "baseboard", + "id", + "policy", + "rack_id", + "state", + "time_created", + "time_modified", + "usable_hardware_threads", + "usable_physical_ram" ] }, - "SetTargetReleaseParams": { - "description": "Parameters for PUT requests to `/v1/system/update/target-release`.", + "SledId": { + "description": "The unique ID of a sled.", "type": "object", "properties": { - "system_version": { - "description": "Version of the system software to make the target release.", + "id": { "type": "string", - "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" + "format": "uuid" } }, "required": [ - "system_version" + "id" ] }, - "Silo": { - "description": "View of a Silo\n\nA Silo is the highest level unit of isolation.", + "SledInstance": { + "description": "An operator's view of an instance running on a given sled", "type": "object", "properties": { - "description": { - "description": "human-readable free-form text about a resource", - "type": "string" - }, - "discoverable": { - "description": "A silo where discoverable is false can be retrieved only by its id - it will not be part of the \"list all silos\" output.", - "type": "boolean" + "active_sled_id": { + "type": "string", + "format": "uuid" }, "id": { "description": "unique, immutable, system-controlled identifier for each resource", "type": "string", "format": "uuid" }, - "identity_mode": { - "description": "How users and groups are managed in this Silo", - "allOf": [ - { - "$ref": "#/components/schemas/SiloIdentityMode" - } - ] + "memory": { + "type": "integer", + "format": "int64" }, - "mapped_fleet_roles": { - "description": "Mapping of which Fleet roles are conferred by each Silo role\n\nThe default is that no Fleet roles are conferred by any Silo roles unless there's a corresponding entry in this map.", - "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "$ref": "#/components/schemas/FleetRole" - }, - "uniqueItems": true - } + "migration_id": { + "nullable": true, + "type": "string", + "format": "uuid" }, "name": { - "description": "unique, mutable, user-controlled identifier for each resource", - "allOf": [ - { - "$ref": "#/components/schemas/Name" - } - ] + "$ref": "#/components/schemas/Name" + }, + "ncpus": { + "type": "integer", + "format": "int64" + }, + "project_name": { + "$ref": "#/components/schemas/Name" + }, + "silo_name": { + "$ref": "#/components/schemas/Name" + }, + "state": { + "$ref": "#/components/schemas/InstanceState" }, "time_created": { "description": "timestamp when this resource was created", @@ -21133,110 +22269,202 @@ } }, "required": [ - "description", - "discoverable", + "active_sled_id", "id", - "identity_mode", - "mapped_fleet_roles", + "memory", "name", + "ncpus", + "project_name", + "silo_name", + "state", "time_created", "time_modified" ] }, - "SiloCreate": { - "description": "Create-time parameters for a `Silo`", + "SledInstanceResultsPage": { + "description": "A single page of results", "type": "object", "properties": { - "admin_group_name": { - "nullable": true, - "description": "If set, this group will be created during Silo creation and granted the \"Silo Admin\" role. Identity providers can assert that users belong to this group and those users can log in and further initialize the Silo.\n\nNote that if configuring a SAML based identity provider, group_attribute_name must be set for users to be considered part of a group. See `SamlIdentityProviderCreate` for more information.", - "type": "string" + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/SledInstance" + } }, - "description": { + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", "type": "string" - }, - "discoverable": { - "type": "boolean" - }, - "identity_mode": { - "$ref": "#/components/schemas/SiloIdentityMode" - }, - "mapped_fleet_roles": { - "description": "Mapping of which Fleet roles are conferred by each Silo role\n\nThe default is that no Fleet roles are conferred by any Silo roles unless there's a corresponding entry in this map.", - "default": {}, + } + }, + "required": [ + "items" + ] + }, + "SledPolicy": { + "description": "The operator-defined policy of a sled.", + "oneOf": [ + { + "description": "The operator has indicated that the sled is in-service.", "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "$ref": "#/components/schemas/FleetRole" + "properties": { + "kind": { + "type": "string", + "enum": [ + "in_service" + ] }, - "uniqueItems": true - } + "provision_policy": { + "description": "Determines whether new resources can be provisioned onto the sled.", + "allOf": [ + { + "$ref": "#/components/schemas/SledProvisionPolicy" + } + ] + } + }, + "required": [ + "kind", + "provision_policy" + ] }, - "name": { - "$ref": "#/components/schemas/Name" + { + "description": "The operator has indicated that the sled has been permanently removed from service.\n\nThis is a terminal state: once a particular sled ID is expunged, it will never return to service. (The actual hardware may be reused, but it will be treated as a brand-new sled.)\n\nAn expunged sled is always non-provisionable.", + "type": "object", + "properties": { + "kind": { + "type": "string", + "enum": [ + "expunged" + ] + } + }, + "required": [ + "kind" + ] + } + ] + }, + "SledProvisionPolicy": { + "description": "The operator-defined provision policy of a sled.\n\nThis controls whether new resources are going to be provisioned on this sled.", + "oneOf": [ + { + "description": "New resources will be provisioned on this sled.", + "type": "string", + "enum": [ + "provisionable" + ] }, - "quotas": { - "description": "Limits the amount of provisionable CPU, memory, and storage in the Silo. CPU and memory are only consumed by running instances, while storage is consumed by any disk or snapshot. A value of 0 means that resource is *not* provisionable.", + { + "description": "New resources will not be provisioned on this sled. However, if the sled is currently in service, existing resources will continue to be on this sled unless manually migrated off.", + "type": "string", + "enum": [ + "non_provisionable" + ] + } + ] + }, + "SledProvisionPolicyParams": { + "description": "Parameters for `sled_set_provision_policy`.", + "type": "object", + "properties": { + "state": { + "description": "The provision state.", "allOf": [ { - "$ref": "#/components/schemas/SiloQuotasCreate" + "$ref": "#/components/schemas/SledProvisionPolicy" + } + ] + } + }, + "required": [ + "state" + ] + }, + "SledProvisionPolicyResponse": { + "description": "Response to `sled_set_provision_policy`.", + "type": "object", + "properties": { + "new_state": { + "description": "The new provision state.", + "allOf": [ + { + "$ref": "#/components/schemas/SledProvisionPolicy" } ] }, - "tls_certificates": { - "description": "Initial TLS certificates to be used for the new Silo's console and API endpoints. These should be valid for the Silo's DNS name(s).", + "old_state": { + "description": "The old provision state.", + "allOf": [ + { + "$ref": "#/components/schemas/SledProvisionPolicy" + } + ] + } + }, + "required": [ + "new_state", + "old_state" + ] + }, + "SledResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", "type": "array", "items": { - "$ref": "#/components/schemas/CertificateCreate" + "$ref": "#/components/schemas/Sled" } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" } }, "required": [ - "description", - "discoverable", - "identity_mode", - "name", - "quotas", - "tls_certificates" + "items" ] }, - "SiloIdentityMode": { - "description": "Describes how identities are managed and users are authenticated in this Silo", + "SledState": { + "description": "The current state of the sled, as determined by Nexus.", "oneOf": [ { - "description": "Users are authenticated with SAML using an external authentication provider. The system updates information about users and groups only during successful authentication (i.e,. \"JIT provisioning\" of users and groups).", + "description": "The sled is currently active, and has resources allocated on it.", "type": "string", "enum": [ - "saml_jit" + "active" ] }, { - "description": "The system is the source of truth about users. There is no linkage to an external authentication provider or identity provider.", + "description": "The sled has been permanently removed from service.\n\nThis is a terminal state: once a particular sled ID is decommissioned, it will never return to service. (The actual hardware may be reused, but it will be treated as a brand-new sled.)", "type": "string", "enum": [ - "local_only" + "decommissioned" ] } ] }, - "SiloIpPool": { - "description": "An IP pool in the context of a silo", + "Snapshot": { + "description": "View of a Snapshot", "type": "object", "properties": { "description": { "description": "human-readable free-form text about a resource", "type": "string" }, + "disk_id": { + "type": "string", + "format": "uuid" + }, "id": { "description": "unique, immutable, system-controlled identifier for each resource", "type": "string", "format": "uuid" }, - "is_default": { - "description": "When a pool is the default for a silo, floating IPs and instance ephemeral IPs will come from that pool when no other pool is specified. There can be at most one default for a given silo.", - "type": "boolean" - }, "name": { "description": "unique, mutable, user-controlled identifier for each resource", "allOf": [ @@ -21245,6 +22473,16 @@ } ] }, + "project_id": { + "type": "string", + "format": "uuid" + }, + "size": { + "$ref": "#/components/schemas/ByteCount" + }, + "state": { + "$ref": "#/components/schemas/SnapshotState" + }, "time_created": { "description": "timestamp when this resource was created", "type": "string", @@ -21258,104 +22496,42 @@ }, "required": [ "description", + "disk_id", "id", - "is_default", "name", + "project_id", + "size", + "state", "time_created", "time_modified" ] }, - "SiloIpPoolResultsPage": { - "description": "A single page of results", + "SnapshotCreate": { + "description": "Create-time parameters for a `Snapshot`", "type": "object", "properties": { - "items": { - "description": "list of items on this page of results", - "type": "array", - "items": { - "$ref": "#/components/schemas/SiloIpPool" - } - }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", + "description": { "type": "string" - } - }, - "required": [ - "items" - ] - }, - "SiloQuotas": { - "description": "A collection of resource counts used to set the virtual capacity of a silo", - "type": "object", - "properties": { - "cpus": { - "description": "Number of virtual CPUs", - "type": "integer", - "format": "int64" - }, - "memory": { - "description": "Amount of memory in bytes", - "allOf": [ - { - "$ref": "#/components/schemas/ByteCount" - } - ] - }, - "silo_id": { - "type": "string", - "format": "uuid" - }, - "storage": { - "description": "Amount of disk storage in bytes", - "allOf": [ - { - "$ref": "#/components/schemas/ByteCount" - } - ] - } - }, - "required": [ - "cpus", - "memory", - "silo_id", - "storage" - ] - }, - "SiloQuotasCreate": { - "description": "The amount of provisionable resources for a Silo", - "type": "object", - "properties": { - "cpus": { - "description": "The amount of virtual CPUs available for running instances in the Silo", - "type": "integer", - "format": "int64" }, - "memory": { - "description": "The amount of RAM (in bytes) available for running instances in the Silo", + "disk": { + "description": "The disk to be snapshotted", "allOf": [ { - "$ref": "#/components/schemas/ByteCount" + "$ref": "#/components/schemas/NameOrId" } ] }, - "storage": { - "description": "The amount of storage (in bytes) available for disks or snapshots", - "allOf": [ - { - "$ref": "#/components/schemas/ByteCount" - } - ] + "name": { + "$ref": "#/components/schemas/Name" } }, "required": [ - "cpus", - "memory", - "storage" + "description", + "disk", + "name" ] }, - "SiloQuotasResultsPage": { + "SnapshotResultsPage": { "description": "A single page of results", "type": "object", "properties": { @@ -21363,7 +22539,7 @@ "description": "list of items on this page of results", "type": "array", "items": { - "$ref": "#/components/schemas/SiloQuotas" + "$ref": "#/components/schemas/Snapshot" } }, "next_page": { @@ -21376,37 +22552,88 @@ "items" ] }, - "SiloQuotasUpdate": { - "description": "Updateable properties of a Silo's resource limits. If a value is omitted it will not be updated.", + "SnapshotState": { + "type": "string", + "enum": [ + "creating", + "ready", + "faulted", + "destroyed" + ] + }, + "SshKey": { + "description": "View of an SSH Key", "type": "object", "properties": { - "cpus": { - "nullable": true, - "description": "The amount of virtual CPUs available for running instances in the Silo", - "type": "integer", - "format": "int64" + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" }, - "memory": { - "nullable": true, - "description": "The amount of RAM (in bytes) available for running instances in the Silo", + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", "allOf": [ { - "$ref": "#/components/schemas/ByteCount" + "$ref": "#/components/schemas/Name" } ] }, - "storage": { - "nullable": true, - "description": "The amount of storage (in bytes) available for disks or snapshots", - "allOf": [ - { - "$ref": "#/components/schemas/ByteCount" - } - ] + "public_key": { + "description": "SSH public key, e.g., `\"ssh-ed25519 AAAAC3NzaC...\"`", + "type": "string" + }, + "silo_user_id": { + "description": "The user to whom this key belongs", + "type": "string", + "format": "uuid" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + } + }, + "required": [ + "description", + "id", + "name", + "public_key", + "silo_user_id", + "time_created", + "time_modified" + ] + }, + "SshKeyCreate": { + "description": "Create-time parameters for an `SshKey`", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "public_key": { + "description": "SSH public key, e.g., `\"ssh-ed25519 AAAAC3NzaC...\"`", + "type": "string" } - } + }, + "required": [ + "description", + "name", + "public_key" + ] }, - "SiloResultsPage": { + "SshKeyResultsPage": { "description": "A single page of results", "type": "object", "properties": { @@ -21414,7 +22641,7 @@ "description": "list of items on this page of results", "type": "array", "items": { - "$ref": "#/components/schemas/Silo" + "$ref": "#/components/schemas/SshKey" } }, "next_page": { @@ -21427,87 +22654,35 @@ "items" ] }, - "SiloRole": { - "type": "string", - "enum": [ - "admin", - "collaborator", - "viewer" - ] - }, - "SiloRolePolicy": { - "description": "Policy for a particular resource\n\nNote that the Policy only describes access granted explicitly for this resource. The policies of parent resources can also cause a user to have access to this resource.", - "type": "object", - "properties": { - "role_assignments": { - "description": "Roles directly assigned on this resource", - "type": "array", - "items": { - "$ref": "#/components/schemas/SiloRoleRoleAssignment" - } - } - }, - "required": [ - "role_assignments" - ] - }, - "SiloRoleRoleAssignment": { - "description": "Describes the assignment of a particular role on a particular resource to a particular identity (user, group, etc.)\n\nThe resource is not part of this structure. Rather, `RoleAssignment`s are put into a `Policy` and that Policy is applied to a particular resource.", + "SupportBundleInfo": { "type": "object", "properties": { - "identity_id": { - "type": "string", - "format": "uuid" + "id": { + "$ref": "#/components/schemas/TypedUuidForSupportBundleKind" }, - "identity_type": { - "$ref": "#/components/schemas/IdentityType" + "reason_for_creation": { + "type": "string" }, - "role_name": { - "$ref": "#/components/schemas/SiloRole" - } - }, - "required": [ - "identity_id", - "identity_type", - "role_name" - ] - }, - "SiloUtilization": { - "description": "View of a silo's resource utilization and capacity", - "type": "object", - "properties": { - "allocated": { - "description": "Accounts for the total amount of resources reserved for silos via their quotas", - "allOf": [ - { - "$ref": "#/components/schemas/VirtualResourceCounts" - } - ] + "reason_for_failure": { + "nullable": true, + "type": "string" }, - "provisioned": { - "description": "Accounts for resources allocated by in silos like CPU or memory for running instances and storage for disks and snapshots Note that CPU and memory resources associated with a stopped instances are not counted here", - "allOf": [ - { - "$ref": "#/components/schemas/VirtualResourceCounts" - } - ] + "state": { + "$ref": "#/components/schemas/SupportBundleState" }, - "silo_id": { + "time_created": { "type": "string", - "format": "uuid" - }, - "silo_name": { - "$ref": "#/components/schemas/Name" + "format": "date-time" } }, "required": [ - "allocated", - "provisioned", - "silo_id", - "silo_name" + "id", + "reason_for_creation", + "state", + "time_created" ] }, - "SiloUtilizationResultsPage": { + "SupportBundleInfoResultsPage": { "description": "A single page of results", "type": "object", "properties": { @@ -21515,7 +22690,7 @@ "description": "list of items on this page of results", "type": "array", "items": { - "$ref": "#/components/schemas/SiloUtilization" + "$ref": "#/components/schemas/SupportBundleInfo" } }, "next_page": { @@ -21528,8 +22703,40 @@ "items" ] }, - "Sled": { - "description": "An operator's view of a Sled.", + "SupportBundleState": { + "oneOf": [ + { + "description": "Support Bundle still actively being collected.\n\nThis is the initial state for a Support Bundle, and it will automatically transition to either \"Failing\" or \"Active\".\n\nIf a user no longer wants to access a Support Bundle, they can request cancellation, which will transition to the \"Destroying\" state.", + "type": "string", + "enum": [ + "collecting" + ] + }, + { + "description": "Support Bundle is being destroyed.\n\nOnce backing storage has been freed, this bundle is destroyed.", + "type": "string", + "enum": [ + "destroying" + ] + }, + { + "description": "Support Bundle was not created successfully, or was created and has lost backing storage.\n\nThe record of the bundle still exists for readability, but the only valid operation on these bundles is to destroy them.", + "type": "string", + "enum": [ + "failed" + ] + }, + { + "description": "Support Bundle has been processed, and is ready for usage.", + "type": "string", + "enum": [ + "active" + ] + } + ] + }, + "Switch": { + "description": "An operator's view of a Switch.", "type": "object", "properties": { "baseboard": { @@ -21540,27 +22747,11 @@ "type": "string", "format": "uuid" }, - "policy": { - "description": "The operator-defined policy of a sled.", - "allOf": [ - { - "$ref": "#/components/schemas/SledPolicy" - } - ] - }, "rack_id": { - "description": "The rack to which this Sled is currently attached", + "description": "The rack to which this Switch is currently attached", "type": "string", "format": "uuid" }, - "state": { - "description": "The current state Nexus believes the sled to be in.", - "allOf": [ - { - "$ref": "#/components/schemas/SledState" - } - ] - }, "time_created": { "description": "timestamp when this resource was created", "type": "string", @@ -21570,511 +22761,453 @@ "description": "timestamp when this resource was last modified", "type": "string", "format": "date-time" - }, - "usable_hardware_threads": { - "description": "The number of hardware threads which can execute on this sled", - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "usable_physical_ram": { - "description": "Amount of RAM which may be used by the Sled's OS", - "allOf": [ - { - "$ref": "#/components/schemas/ByteCount" - } - ] } }, "required": [ "baseboard", "id", - "policy", "rack_id", - "state", "time_created", - "time_modified", - "usable_hardware_threads", - "usable_physical_ram" + "time_modified" ] }, - "SledId": { - "description": "The unique ID of a sled.", + "SwitchBgpHistory": { + "description": "BGP message history for a particular switch.", "type": "object", "properties": { - "id": { - "type": "string", - "format": "uuid" + "history": { + "description": "Message history indexed by peer address.", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/BgpMessageHistory" + } + }, + "switch": { + "description": "Switch this message history is associated with.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchLocation" + } + ] } }, "required": [ - "id" + "history", + "switch" ] }, - "SledInstance": { - "description": "An operator's view of an instance running on a given sled", + "SwitchInterfaceConfig": { + "description": "A switch port interface configuration for a port settings object.", "type": "object", "properties": { - "active_sled_id": { - "type": "string", - "format": "uuid" - }, "id": { - "description": "unique, immutable, system-controlled identifier for each resource", - "type": "string", - "format": "uuid" - }, - "memory": { - "type": "integer", - "format": "int64" - }, - "migration_id": { - "nullable": true, + "description": "A unique identifier for this switch interface.", "type": "string", "format": "uuid" }, - "name": { - "$ref": "#/components/schemas/Name" - }, - "ncpus": { - "type": "integer", - "format": "int64" - }, - "project_name": { - "$ref": "#/components/schemas/Name" - }, - "silo_name": { - "$ref": "#/components/schemas/Name" + "interface_name": { + "description": "The name of this switch interface.", + "type": "string" }, - "state": { - "$ref": "#/components/schemas/InstanceState" + "kind": { + "description": "The switch interface kind.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchInterfaceKind2" + } + ] }, - "time_created": { - "description": "timestamp when this resource was created", + "port_settings_id": { + "description": "The port settings object this switch interface configuration belongs to.", "type": "string", - "format": "date-time" + "format": "uuid" }, - "time_modified": { - "description": "timestamp when this resource was last modified", - "type": "string", - "format": "date-time" + "v6_enabled": { + "description": "Whether or not IPv6 is enabled on this interface.", + "type": "boolean" } }, "required": [ - "active_sled_id", "id", - "memory", - "name", - "ncpus", - "project_name", - "silo_name", - "state", - "time_created", - "time_modified" + "interface_name", + "kind", + "port_settings_id", + "v6_enabled" ] }, - "SledInstanceResultsPage": { - "description": "A single page of results", + "SwitchInterfaceConfigCreate": { + "description": "A layer-3 switch interface configuration. When IPv6 is enabled, a link local address will be created for the interface.", "type": "object", "properties": { - "items": { - "description": "list of items on this page of results", - "type": "array", - "items": { - "$ref": "#/components/schemas/SledInstance" - } + "kind": { + "description": "What kind of switch interface this configuration represents.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchInterfaceKind" + } + ] }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", - "type": "string" + "v6_enabled": { + "description": "Whether or not IPv6 is enabled.", + "type": "boolean" } }, "required": [ - "items" + "kind", + "v6_enabled" ] }, - "SledPolicy": { - "description": "The operator-defined policy of a sled.", + "SwitchInterfaceKind": { + "description": "Indicates the kind for a switch interface.", "oneOf": [ { - "description": "The operator has indicated that the sled is in-service.", + "description": "Primary interfaces are associated with physical links. There is exactly one primary interface per physical link.", "type": "object", "properties": { - "kind": { + "type": { "type": "string", "enum": [ - "in_service" + "primary" ] - }, - "provision_policy": { - "description": "Determines whether new resources can be provisioned onto the sled.", - "allOf": [ - { - "$ref": "#/components/schemas/SledProvisionPolicy" - } + } + }, + "required": [ + "type" + ] + }, + { + "description": "VLAN interfaces allow physical interfaces to be multiplexed onto multiple logical links, each distinguished by a 12-bit 802.1Q Ethernet tag.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "vlan" ] + }, + "vid": { + "description": "The virtual network id (VID) that distinguishes this interface and is used for producing and consuming 802.1Q Ethernet tags. This field has a maximum value of 4095 as 802.1Q tags are twelve bits.", + "type": "integer", + "format": "uint16", + "minimum": 0 } }, "required": [ - "kind", - "provision_policy" + "type", + "vid" ] }, { - "description": "The operator has indicated that the sled has been permanently removed from service.\n\nThis is a terminal state: once a particular sled ID is expunged, it will never return to service. (The actual hardware may be reused, but it will be treated as a brand-new sled.)\n\nAn expunged sled is always non-provisionable.", + "description": "Loopback interfaces are anchors for IP addresses that are not specific to any particular port.", "type": "object", "properties": { - "kind": { + "type": { "type": "string", "enum": [ - "expunged" + "loopback" ] } }, "required": [ - "kind" + "type" ] } ] }, - "SledProvisionPolicy": { - "description": "The operator-defined provision policy of a sled.\n\nThis controls whether new resources are going to be provisioned on this sled.", + "SwitchInterfaceKind2": { + "description": "Describes the kind of an switch interface.", "oneOf": [ { - "description": "New resources will be provisioned on this sled.", + "description": "Primary interfaces are associated with physical links. There is exactly one primary interface per physical link.", "type": "string", "enum": [ - "provisionable" + "primary" ] }, { - "description": "New resources will not be provisioned on this sled. However, if the sled is currently in service, existing resources will continue to be on this sled unless manually migrated off.", + "description": "VLAN interfaces allow physical interfaces to be multiplexed onto multiple logical links, each distinguished by a 12-bit 802.1Q Ethernet tag.", "type": "string", "enum": [ - "non_provisionable" - ] - } - ] - }, - "SledProvisionPolicyParams": { - "description": "Parameters for `sled_set_provision_policy`.", - "type": "object", - "properties": { - "state": { - "description": "The provision state.", - "allOf": [ - { - "$ref": "#/components/schemas/SledProvisionPolicy" - } - ] - } - }, - "required": [ - "state" - ] - }, - "SledProvisionPolicyResponse": { - "description": "Response to `sled_set_provision_policy`.", - "type": "object", - "properties": { - "new_state": { - "description": "The new provision state.", - "allOf": [ - { - "$ref": "#/components/schemas/SledProvisionPolicy" - } + "vlan" ] }, - "old_state": { - "description": "The old provision state.", - "allOf": [ - { - "$ref": "#/components/schemas/SledProvisionPolicy" - } + { + "description": "Loopback interfaces are anchors for IP addresses that are not specific to any particular port.", + "type": "string", + "enum": [ + "loopback" ] } - }, - "required": [ - "new_state", - "old_state" - ] - }, - "SledResultsPage": { - "description": "A single page of results", - "type": "object", - "properties": { - "items": { - "description": "list of items on this page of results", - "type": "array", - "items": { - "$ref": "#/components/schemas/Sled" - } - }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", - "type": "string" - } - }, - "required": [ - "items" ] }, - "SledState": { - "description": "The current state of the sled, as determined by Nexus.", + "SwitchLinkState": {}, + "SwitchLocation": { + "description": "Identifies switch physical location", "oneOf": [ { - "description": "The sled is currently active, and has resources allocated on it.", + "description": "Switch in upper slot", "type": "string", "enum": [ - "active" + "switch0" ] }, { - "description": "The sled has been permanently removed from service.\n\nThis is a terminal state: once a particular sled ID is decommissioned, it will never return to service. (The actual hardware may be reused, but it will be treated as a brand-new sled.)", + "description": "Switch in lower slot", "type": "string", "enum": [ - "decommissioned" + "switch1" ] } ] }, - "Snapshot": { - "description": "View of a Snapshot", + "SwitchPort": { + "description": "A switch port represents a physical external port on a rack switch.", "type": "object", "properties": { - "description": { - "description": "human-readable free-form text about a resource", + "id": { + "description": "The id of the switch port.", + "type": "string", + "format": "uuid" + }, + "port_name": { + "description": "The name of this switch port.", "type": "string" }, - "disk_id": { + "port_settings_id": { + "nullable": true, + "description": "The primary settings group of this switch port. Will be `None` until this switch port is configured.", "type": "string", "format": "uuid" }, - "id": { - "description": "unique, immutable, system-controlled identifier for each resource", + "rack_id": { + "description": "The rack this switch port belongs to.", "type": "string", "format": "uuid" }, - "name": { - "description": "unique, mutable, user-controlled identifier for each resource", + "switch_location": { + "description": "The switch location of this switch port.", + "type": "string" + } + }, + "required": [ + "id", + "port_name", + "rack_id", + "switch_location" + ] + }, + "SwitchPortAddressConfig": { + "description": "An IP address configuration for a port settings object.", + "type": "object", + "properties": { + "address": { + "description": "The IP address and prefix.", "allOf": [ { - "$ref": "#/components/schemas/Name" + "$ref": "#/components/schemas/IpNet" } ] }, - "project_id": { + "address_lot_block_id": { + "description": "The id of the address lot block this address is drawn from.", "type": "string", "format": "uuid" }, - "size": { - "$ref": "#/components/schemas/ByteCount" - }, - "state": { - "$ref": "#/components/schemas/SnapshotState" - }, - "time_created": { - "description": "timestamp when this resource was created", - "type": "string", - "format": "date-time" + "interface_name": { + "description": "The interface name this address belongs to.", + "type": "string" }, - "time_modified": { - "description": "timestamp when this resource was last modified", + "port_settings_id": { + "description": "The port settings object this address configuration belongs to.", "type": "string", - "format": "date-time" + "format": "uuid" + }, + "vlan_id": { + "nullable": true, + "description": "An optional VLAN ID", + "type": "integer", + "format": "uint16", + "minimum": 0 } }, "required": [ - "description", - "disk_id", - "id", - "name", - "project_id", - "size", - "state", - "time_created", - "time_modified" + "address", + "address_lot_block_id", + "interface_name", + "port_settings_id" ] }, - "SnapshotCreate": { - "description": "Create-time parameters for a `Snapshot`", + "SwitchPortApplySettings": { + "description": "Parameters for applying settings to switch ports.", "type": "object", "properties": { - "description": { - "type": "string" - }, - "disk": { - "description": "The disk to be snapshotted", + "port_settings": { + "description": "A name or id to use when applying switch port settings.", "allOf": [ { "$ref": "#/components/schemas/NameOrId" } ] - }, - "name": { - "$ref": "#/components/schemas/Name" } }, "required": [ - "description", - "disk", - "name" + "port_settings" ] }, - "SnapshotResultsPage": { - "description": "A single page of results", + "SwitchPortConfig": { + "description": "A physical port configuration for a port settings object.", "type": "object", "properties": { - "items": { - "description": "list of items on this page of results", - "type": "array", - "items": { - "$ref": "#/components/schemas/Snapshot" - } + "geometry": { + "description": "The physical link geometry of the port.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchPortGeometry2" + } + ] }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", - "type": "string" + "port_settings_id": { + "description": "The id of the port settings object this configuration belongs to.", + "type": "string", + "format": "uuid" } }, "required": [ - "items" - ] - }, - "SnapshotState": { - "type": "string", - "enum": [ - "creating", - "ready", - "faulted", - "destroyed" + "geometry", + "port_settings_id" ] }, - "SshKey": { - "description": "View of an SSH Key", + "SwitchPortConfigCreate": { + "description": "Physical switch port configuration.", "type": "object", "properties": { - "description": { - "description": "human-readable free-form text about a resource", - "type": "string" - }, - "id": { - "description": "unique, immutable, system-controlled identifier for each resource", - "type": "string", - "format": "uuid" - }, - "name": { - "description": "unique, mutable, user-controlled identifier for each resource", + "geometry": { + "description": "Link geometry for the switch port.", "allOf": [ { - "$ref": "#/components/schemas/Name" + "$ref": "#/components/schemas/SwitchPortGeometry" } ] - }, - "public_key": { - "description": "SSH public key, e.g., `\"ssh-ed25519 AAAAC3NzaC...\"`", - "type": "string" - }, - "silo_user_id": { - "description": "The user to whom this key belongs", + } + }, + "required": [ + "geometry" + ] + }, + "SwitchPortGeometry": { + "description": "The link geometry associated with a switch port.", + "oneOf": [ + { + "description": "The port contains a single QSFP28 link with four lanes.", "type": "string", - "format": "uuid" + "enum": [ + "qsfp28x1" + ] }, - "time_created": { - "description": "timestamp when this resource was created", + { + "description": "The port contains two QSFP28 links each with two lanes.", "type": "string", - "format": "date-time" + "enum": [ + "qsfp28x2" + ] }, - "time_modified": { - "description": "timestamp when this resource was last modified", + { + "description": "The port contains four SFP28 links each with one lane.", "type": "string", - "format": "date-time" + "enum": [ + "sfp28x4" + ] } - }, - "required": [ - "description", - "id", - "name", - "public_key", - "silo_user_id", - "time_created", - "time_modified" ] }, - "SshKeyCreate": { - "description": "Create-time parameters for an `SshKey`", - "type": "object", - "properties": { - "description": { - "type": "string" + "SwitchPortGeometry2": { + "description": "The link geometry associated with a switch port.", + "oneOf": [ + { + "description": "The port contains a single QSFP28 link with four lanes.", + "type": "string", + "enum": [ + "qsfp28x1" + ] }, - "name": { - "$ref": "#/components/schemas/Name" + { + "description": "The port contains two QSFP28 links each with two lanes.", + "type": "string", + "enum": [ + "qsfp28x2" + ] }, - "public_key": { - "description": "SSH public key, e.g., `\"ssh-ed25519 AAAAC3NzaC...\"`", - "type": "string" + { + "description": "The port contains four SFP28 links each with one lane.", + "type": "string", + "enum": [ + "sfp28x4" + ] } - }, - "required": [ - "description", - "name", - "public_key" ] }, - "SshKeyResultsPage": { - "description": "A single page of results", + "SwitchPortLinkConfig": { + "description": "A link configuration for a port settings object.", "type": "object", "properties": { - "items": { - "description": "list of items on this page of results", - "type": "array", - "items": { - "$ref": "#/components/schemas/SshKey" - } + "autoneg": { + "description": "Whether or not the link has autonegotiation enabled.", + "type": "boolean" }, - "next_page": { + "fec": { "nullable": true, - "description": "token used to fetch the next page of results (if any)", - "type": "string" - } - }, - "required": [ - "items" - ] - }, - "SupportBundleInfo": { - "type": "object", - "properties": { - "id": { - "$ref": "#/components/schemas/TypedUuidForSupportBundleKind" + "description": "The requested forward-error correction method. If this is not specified, the standard FEC for the underlying media will be applied if it can be determined.", + "allOf": [ + { + "$ref": "#/components/schemas/LinkFec" + } + ] }, - "reason_for_creation": { + "link_name": { + "description": "The name of this link.", "type": "string" }, - "reason_for_failure": { + "lldp_link_config_id": { "nullable": true, - "type": "string" + "description": "The link-layer discovery protocol service configuration id for this link.", + "type": "string", + "format": "uuid" }, - "state": { - "$ref": "#/components/schemas/SupportBundleState" + "mtu": { + "description": "The maximum transmission unit for this link.", + "type": "integer", + "format": "uint16", + "minimum": 0 }, - "time_created": { + "port_settings_id": { + "description": "The port settings this link configuration belongs to.", "type": "string", - "format": "date-time" + "format": "uuid" + }, + "speed": { + "description": "The configured speed of the link.", + "allOf": [ + { + "$ref": "#/components/schemas/LinkSpeed" + } + ] + }, + "tx_eq_config_id": { + "nullable": true, + "description": "The tx_eq configuration id for this link.", + "type": "string", + "format": "uuid" } }, "required": [ - "id", - "reason_for_creation", - "state", - "time_created" + "autoneg", + "link_name", + "mtu", + "port_settings_id", + "speed" ] }, - "SupportBundleInfoResultsPage": { + "SwitchPortResultsPage": { "description": "A single page of results", "type": "object", "properties": { @@ -22082,7 +23215,7 @@ "description": "list of items on this page of results", "type": "array", "items": { - "$ref": "#/components/schemas/SupportBundleInfo" + "$ref": "#/components/schemas/SwitchPort" } }, "next_page": { @@ -22095,54 +23228,77 @@ "items" ] }, - "SupportBundleState": { - "oneOf": [ - { - "description": "Support Bundle still actively being collected.\n\nThis is the initial state for a Support Bundle, and it will automatically transition to either \"Failing\" or \"Active\".\n\nIf a user no longer wants to access a Support Bundle, they can request cancellation, which will transition to the \"Destroying\" state.", - "type": "string", - "enum": [ - "collecting" + "SwitchPortRouteConfig": { + "description": "A route configuration for a port settings object.", + "type": "object", + "properties": { + "dst": { + "description": "The route's destination network.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNet" + } ] }, - { - "description": "Support Bundle is being destroyed.\n\nOnce backing storage has been freed, this bundle is destroyed.", - "type": "string", - "enum": [ - "destroying" + "gw": { + "description": "The route's gateway address.", + "allOf": [ + { + "$ref": "#/components/schemas/IpNet" + } ] }, - { - "description": "Support Bundle was not created successfully, or was created and has lost backing storage.\n\nThe record of the bundle still exists for readability, but the only valid operation on these bundles is to destroy them.", - "type": "string", - "enum": [ - "failed" - ] + "interface_name": { + "description": "The interface name this route configuration is assigned to.", + "type": "string" }, - { - "description": "Support Bundle has been processed, and is ready for usage.", + "port_settings_id": { + "description": "The port settings object this route configuration belongs to.", "type": "string", - "enum": [ - "active" - ] + "format": "uuid" + }, + "rib_priority": { + "nullable": true, + "description": "RIB Priority indicating priority within and across protocols.", + "type": "integer", + "format": "uint8", + "minimum": 0 + }, + "vlan_id": { + "nullable": true, + "description": "The VLAN identifier for the route. Use this if the gateway is reachable over an 802.1Q tagged L2 segment.", + "type": "integer", + "format": "uint16", + "minimum": 0 } + }, + "required": [ + "dst", + "gw", + "interface_name", + "port_settings_id" ] }, - "Switch": { - "description": "An operator's view of a Switch.", + "SwitchPortSettings": { + "description": "A switch port settings identity whose id may be used to view additional details.", "type": "object", "properties": { - "baseboard": { - "$ref": "#/components/schemas/Baseboard" + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" }, "id": { "description": "unique, immutable, system-controlled identifier for each resource", "type": "string", "format": "uuid" }, - "rack_id": { - "description": "The rack to which this Switch is currently attached", - "type": "string", - "format": "uuid" + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] }, "time_created": { "description": "timestamp when this resource was created", @@ -22156,450 +23312,456 @@ } }, "required": [ - "baseboard", + "description", "id", - "rack_id", + "name", "time_created", "time_modified" ] }, - "SwitchBgpHistory": { - "description": "BGP message history for a particular switch.", + "SwitchPortSettingsCreate": { + "description": "Parameters for creating switch port settings. Switch port settings are the central data structure for setting up external networking. Switch port settings include link, interface, route, address and dynamic network protocol configuration.", "type": "object", "properties": { - "history": { - "description": "Message history indexed by peer address.", + "addresses": { + "description": "Addresses indexed by interface name.", "type": "object", "additionalProperties": { - "$ref": "#/components/schemas/BgpMessageHistory" + "$ref": "#/components/schemas/AddressConfig" } }, - "switch": { - "description": "Switch this message history is associated with.", - "allOf": [ - { - "$ref": "#/components/schemas/SwitchLocation" - } - ] - } - }, - "required": [ - "history", - "switch" - ] - }, - "SwitchInterfaceConfig": { - "description": "A switch port interface configuration for a port settings object.", - "type": "object", - "properties": { - "id": { - "description": "A unique identifier for this switch interface.", - "type": "string", - "format": "uuid" + "bgp_peers": { + "description": "BGP peers indexed by interface name.", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/BgpPeerConfig" + } }, - "interface_name": { - "description": "The name of this switch interface.", + "description": { "type": "string" }, - "kind": { - "description": "The switch interface kind.", - "allOf": [ - { - "$ref": "#/components/schemas/SwitchInterfaceKind2" - } - ] - }, - "port_settings_id": { - "description": "The port settings object this switch interface configuration belongs to.", - "type": "string", - "format": "uuid" - }, - "v6_enabled": { - "description": "Whether or not IPv6 is enabled on this interface.", - "type": "boolean" - } - }, - "required": [ - "id", - "interface_name", - "kind", - "port_settings_id", - "v6_enabled" - ] - }, - "SwitchInterfaceConfigCreate": { - "description": "A layer-3 switch interface configuration. When IPv6 is enabled, a link local address will be created for the interface.", - "type": "object", - "properties": { - "kind": { - "description": "What kind of switch interface this configuration represents.", - "allOf": [ - { - "$ref": "#/components/schemas/SwitchInterfaceKind" - } - ] - }, - "v6_enabled": { - "description": "Whether or not IPv6 is enabled.", - "type": "boolean" - } - }, - "required": [ - "kind", - "v6_enabled" - ] - }, - "SwitchInterfaceKind": { - "description": "Indicates the kind for a switch interface.", - "oneOf": [ - { - "description": "Primary interfaces are associated with physical links. There is exactly one primary interface per physical link.", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "primary" - ] - } - }, - "required": [ - "type" - ] - }, - { - "description": "VLAN interfaces allow physical interfaces to be multiplexed onto multiple logical links, each distinguished by a 12-bit 802.1Q Ethernet tag.", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "vlan" - ] - }, - "vid": { - "description": "The virtual network id (VID) that distinguishes this interface and is used for producing and consuming 802.1Q Ethernet tags. This field has a maximum value of 4095 as 802.1Q tags are twelve bits.", - "type": "integer", - "format": "uint16", - "minimum": 0 - } - }, - "required": [ - "type", - "vid" - ] + "groups": { + "type": "array", + "items": { + "$ref": "#/components/schemas/NameOrId" + } }, - { - "description": "Loopback interfaces are anchors for IP addresses that are not specific to any particular port.", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "loopback" - ] - } - }, - "required": [ - "type" - ] - } - ] - }, - "SwitchInterfaceKind2": { - "description": "Describes the kind of an switch interface.", - "oneOf": [ - { - "description": "Primary interfaces are associated with physical links. There is exactly one primary interface per physical link.", - "type": "string", - "enum": [ - "primary" - ] + "interfaces": { + "description": "Interfaces indexed by link name.", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/SwitchInterfaceConfigCreate" + } }, - { - "description": "VLAN interfaces allow physical interfaces to be multiplexed onto multiple logical links, each distinguished by a 12-bit 802.1Q Ethernet tag.", - "type": "string", - "enum": [ - "vlan" - ] + "links": { + "description": "Links indexed by phy name. On ports that are not broken out, this is always phy0. On a 2x breakout the options are phy0 and phy1, on 4x phy0-phy3, etc.", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/LinkConfigCreate" + } }, - { - "description": "Loopback interfaces are anchors for IP addresses that are not specific to any particular port.", - "type": "string", - "enum": [ - "loopback" - ] - } - ] - }, - "SwitchLinkState": {}, - "SwitchLocation": { - "description": "Identifies switch physical location", - "oneOf": [ - { - "description": "Switch in upper slot", - "type": "string", - "enum": [ - "switch0" - ] + "name": { + "$ref": "#/components/schemas/Name" }, - { - "description": "Switch in lower slot", - "type": "string", - "enum": [ - "switch1" - ] + "port_config": { + "$ref": "#/components/schemas/SwitchPortConfigCreate" + }, + "routes": { + "description": "Routes indexed by interface name.", + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/RouteConfig" + } } + }, + "required": [ + "addresses", + "bgp_peers", + "description", + "groups", + "interfaces", + "links", + "name", + "port_config", + "routes" ] }, - "SwitchPort": { - "description": "A switch port represents a physical external port on a rack switch.", + "SwitchPortSettingsGroups": { + "description": "This structure maps a port settings object to a port settings groups. Port settings objects may inherit settings from groups. This mapping defines the relationship between settings objects and the groups they reference.", "type": "object", "properties": { - "id": { - "description": "The id of the switch port.", + "port_settings_group_id": { + "description": "The id of a port settings group being referenced by a port settings object.", "type": "string", "format": "uuid" }, - "port_name": { - "description": "The name of this switch port.", - "type": "string" - }, "port_settings_id": { - "nullable": true, - "description": "The primary settings group of this switch port. Will be `None` until this switch port is configured.", - "type": "string", - "format": "uuid" - }, - "rack_id": { - "description": "The rack this switch port belongs to.", + "description": "The id of a port settings object referencing a port settings group.", "type": "string", "format": "uuid" + } + }, + "required": [ + "port_settings_group_id", + "port_settings_id" + ] + }, + "SwitchPortSettingsResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/SwitchPortSettings" + } }, - "switch_location": { - "description": "The switch location of this switch port.", + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", "type": "string" } }, "required": [ - "id", - "port_name", - "rack_id", - "switch_location" + "items" ] }, - "SwitchPortAddressConfig": { - "description": "An IP address configuration for a port settings object.", + "SwitchPortSettingsView": { + "description": "This structure contains all port settings information in one place. It's a convenience data structure for getting a complete view of a particular port's settings.", "type": "object", "properties": { - "address": { - "description": "The IP address and prefix.", + "addresses": { + "description": "Layer 3 IP address settings.", + "type": "array", + "items": { + "$ref": "#/components/schemas/SwitchPortAddressConfig" + } + }, + "bgp_peers": { + "description": "BGP peer settings.", + "type": "array", + "items": { + "$ref": "#/components/schemas/BgpPeer" + } + }, + "groups": { + "description": "Switch port settings included from other switch port settings groups.", + "type": "array", + "items": { + "$ref": "#/components/schemas/SwitchPortSettingsGroups" + } + }, + "interfaces": { + "description": "Layer 3 interface settings.", + "type": "array", + "items": { + "$ref": "#/components/schemas/SwitchInterfaceConfig" + } + }, + "link_lldp": { + "description": "Link-layer discovery protocol (LLDP) settings.", + "type": "array", + "items": { + "$ref": "#/components/schemas/LldpLinkConfig" + } + }, + "links": { + "description": "Layer 2 link settings.", + "type": "array", + "items": { + "$ref": "#/components/schemas/SwitchPortLinkConfig" + } + }, + "port": { + "description": "Layer 1 physical port settings.", "allOf": [ { - "$ref": "#/components/schemas/IpNet" + "$ref": "#/components/schemas/SwitchPortConfig" } ] }, - "address_lot_block_id": { - "description": "The id of the address lot block this address is drawn from.", - "type": "string", - "format": "uuid" + "routes": { + "description": "IP route settings.", + "type": "array", + "items": { + "$ref": "#/components/schemas/SwitchPortRouteConfig" + } }, - "interface_name": { - "description": "The interface name this address belongs to.", - "type": "string" + "settings": { + "description": "The primary switch port settings handle.", + "allOf": [ + { + "$ref": "#/components/schemas/SwitchPortSettings" + } + ] }, - "port_settings_id": { - "description": "The port settings object this address configuration belongs to.", - "type": "string", - "format": "uuid" + "tx_eq": { + "description": "TX equalization settings. These are optional, and most links will not need them.", + "type": "array", + "items": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/TxEqConfig" + } + ] + } }, - "vlan_id": { - "nullable": true, - "description": "An optional VLAN ID", - "type": "integer", - "format": "uint16", - "minimum": 0 + "vlan_interfaces": { + "description": "Vlan interface settings.", + "type": "array", + "items": { + "$ref": "#/components/schemas/SwitchVlanInterfaceConfig" + } } }, "required": [ - "address", - "address_lot_block_id", - "interface_name", - "port_settings_id" + "addresses", + "bgp_peers", + "groups", + "interfaces", + "link_lldp", + "links", + "port", + "routes", + "settings", + "tx_eq", + "vlan_interfaces" ] }, - "SwitchPortApplySettings": { - "description": "Parameters for applying settings to switch ports.", + "SwitchResultsPage": { + "description": "A single page of results", "type": "object", "properties": { - "port_settings": { - "description": "A name or id to use when applying switch port settings.", - "allOf": [ - { - "$ref": "#/components/schemas/NameOrId" - } - ] + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/Switch" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "SwitchVlanInterfaceConfig": { + "description": "A switch port VLAN interface configuration for a port settings object.", + "type": "object", + "properties": { + "interface_config_id": { + "description": "The switch interface configuration this VLAN interface configuration belongs to.", + "type": "string", + "format": "uuid" + }, + "vlan_id": { + "description": "The virtual network id for this interface that is used for producing and consuming 802.1Q Ethernet tags. This field has a maximum value of 4095 as 802.1Q tags are twelve bits.", + "type": "integer", + "format": "uint16", + "minimum": 0 } }, "required": [ - "port_settings" + "interface_config_id", + "vlan_id" ] }, - "SwitchPortConfig": { - "description": "A physical port configuration for a port settings object.", + "Table": { + "description": "A table represents one or more timeseries with the same schema.\n\nA table is the result of an OxQL query. It contains a name, usually the name of the timeseries schema from which the data is derived, and any number of timeseries, which contain the actual data.", "type": "object", "properties": { - "geometry": { - "description": "The physical link geometry of the port.", - "allOf": [ - { - "$ref": "#/components/schemas/SwitchPortGeometry2" - } - ] + "name": { + "type": "string" }, - "port_settings_id": { - "description": "The id of the port settings object this configuration belongs to.", - "type": "string", - "format": "uuid" + "timeseries": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/Timeseries" + } } }, "required": [ - "geometry", - "port_settings_id" + "name", + "timeseries" ] }, - "SwitchPortConfigCreate": { - "description": "Physical switch port configuration.", + "TargetRelease": { + "description": "View of a system software target release.", "type": "object", "properties": { - "geometry": { - "description": "Link geometry for the switch port.", + "generation": { + "description": "The target-release generation number.", + "type": "integer", + "format": "int64" + }, + "release_source": { + "description": "The source of the target release.", "allOf": [ { - "$ref": "#/components/schemas/SwitchPortGeometry" + "$ref": "#/components/schemas/TargetReleaseSource" } ] + }, + "time_requested": { + "description": "The time it was set as the target release.", + "type": "string", + "format": "date-time" } }, "required": [ - "geometry" + "generation", + "release_source", + "time_requested" ] }, - "SwitchPortGeometry": { - "description": "The link geometry associated with a switch port.", + "TargetReleaseSource": { + "description": "Source of a system software target release.", "oneOf": [ { - "description": "The port contains a single QSFP28 link with four lanes.", - "type": "string", - "enum": [ - "qsfp28x1" - ] - }, - { - "description": "The port contains two QSFP28 links each with two lanes.", - "type": "string", - "enum": [ - "qsfp28x2" + "description": "Unspecified or unknown source (probably MUPdate).", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "unspecified" + ] + } + }, + "required": [ + "type" ] }, { - "description": "The port contains four SFP28 links each with one lane.", - "type": "string", - "enum": [ - "sfp28x4" + "description": "The specified release of the rack's system software.", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "system_version" + ] + }, + "version": { + "type": "string", + "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" + } + }, + "required": [ + "type", + "version" ] } ] }, - "SwitchPortGeometry2": { - "description": "The link geometry associated with a switch port.", - "oneOf": [ - { - "description": "The port contains a single QSFP28 link with four lanes.", - "type": "string", - "enum": [ - "qsfp28x1" - ] - }, - { - "description": "The port contains two QSFP28 links each with two lanes.", - "type": "string", - "enum": [ - "qsfp28x2" - ] + "Timeseries": { + "description": "A timeseries contains a timestamped set of values from one source.\n\nThis includes the typed key-value pairs that uniquely identify it, and the set of timestamps and data values from it.", + "type": "object", + "properties": { + "fields": { + "type": "object", + "additionalProperties": { + "$ref": "#/components/schemas/FieldValue" + } }, - { - "description": "The port contains four SFP28 links each with one lane.", - "type": "string", - "enum": [ - "sfp28x4" - ] + "points": { + "$ref": "#/components/schemas/Points" } + }, + "required": [ + "fields", + "points" ] }, - "SwitchPortLinkConfig": { - "description": "A link configuration for a port settings object.", + "TimeseriesDescription": { + "description": "Text descriptions for the target and metric of a timeseries.", "type": "object", "properties": { - "autoneg": { - "description": "Whether or not the link has autonegotiation enabled.", - "type": "boolean" - }, - "fec": { - "nullable": true, - "description": "The requested forward-error correction method. If this is not specified, the standard FEC for the underlying media will be applied if it can be determined.", - "allOf": [ - { - "$ref": "#/components/schemas/LinkFec" - } - ] + "metric": { + "type": "string" }, - "link_name": { - "description": "The name of this link.", + "target": { + "type": "string" + } + }, + "required": [ + "metric", + "target" + ] + }, + "TimeseriesName": { + "title": "The name of a timeseries", + "description": "Names are constructed by concatenating the target and metric names with ':'. Target and metric names must be lowercase alphanumeric characters with '_' separating words.", + "type": "string", + "pattern": "^(([a-z]+[a-z0-9]*)(_([a-z0-9]+))*):(([a-z]+[a-z0-9]*)(_([a-z0-9]+))*)$" + }, + "TimeseriesQuery": { + "description": "A timeseries query string, written in the Oximeter query language.", + "type": "object", + "properties": { + "query": { + "description": "A timeseries query string, written in the Oximeter query language.", "type": "string" + } + }, + "required": [ + "query" + ] + }, + "TimeseriesSchema": { + "description": "The schema for a timeseries.\n\nThis includes the name of the timeseries, as well as the datum type of its metric and the schema for each field.", + "type": "object", + "properties": { + "authz_scope": { + "$ref": "#/components/schemas/AuthzScope" }, - "lldp_link_config_id": { - "nullable": true, - "description": "The link-layer discovery protocol service configuration id for this link.", + "created": { "type": "string", - "format": "uuid" + "format": "date-time" }, - "mtu": { - "description": "The maximum transmission unit for this link.", - "type": "integer", - "format": "uint16", - "minimum": 0 + "datum_type": { + "$ref": "#/components/schemas/DatumType" }, - "port_settings_id": { - "description": "The port settings this link configuration belongs to.", - "type": "string", - "format": "uuid" + "description": { + "$ref": "#/components/schemas/TimeseriesDescription" }, - "speed": { - "description": "The configured speed of the link.", - "allOf": [ - { - "$ref": "#/components/schemas/LinkSpeed" - } - ] + "field_schema": { + "type": "array", + "items": { + "$ref": "#/components/schemas/FieldSchema" + }, + "uniqueItems": true }, - "tx_eq_config_id": { - "nullable": true, - "description": "The tx_eq configuration id for this link.", - "type": "string", - "format": "uuid" - } - }, - "required": [ - "autoneg", - "link_name", - "mtu", - "port_settings_id", - "speed" + "timeseries_name": { + "$ref": "#/components/schemas/TimeseriesName" + }, + "units": { + "$ref": "#/components/schemas/Units" + }, + "version": { + "type": "integer", + "format": "uint8", + "minimum": 1 + } + }, + "required": [ + "authz_scope", + "created", + "datum_type", + "description", + "field_schema", + "timeseries_name", + "units", + "version" ] }, - "SwitchPortResultsPage": { + "TimeseriesSchemaResultsPage": { "description": "A single page of results", "type": "object", "properties": { @@ -22607,7 +23769,7 @@ "description": "list of items on this page of results", "type": "array", "items": { - "$ref": "#/components/schemas/SwitchPort" + "$ref": "#/components/schemas/TimeseriesSchema" } }, "next_page": { @@ -22620,185 +23782,215 @@ "items" ] }, - "SwitchPortRouteConfig": { - "description": "A route configuration for a port settings object.", + "TxEqConfig": { + "description": "Per-port tx-eq overrides. This can be used to fine-tune the transceiver equalization settings to improve signal integrity.", "type": "object", "properties": { - "dst": { - "description": "The route's destination network.", - "allOf": [ - { - "$ref": "#/components/schemas/IpNet" - } - ] - }, - "gw": { - "description": "The route's gateway address.", - "allOf": [ - { - "$ref": "#/components/schemas/IpNet" - } - ] + "main": { + "nullable": true, + "description": "Main tap", + "type": "integer", + "format": "int32" }, - "interface_name": { - "description": "The interface name this route configuration is assigned to.", - "type": "string" + "post1": { + "nullable": true, + "description": "Post-cursor tap1", + "type": "integer", + "format": "int32" }, - "port_settings_id": { - "description": "The port settings object this route configuration belongs to.", - "type": "string", - "format": "uuid" + "post2": { + "nullable": true, + "description": "Post-cursor tap2", + "type": "integer", + "format": "int32" }, - "rib_priority": { + "pre1": { "nullable": true, - "description": "RIB Priority indicating priority within and across protocols.", + "description": "Pre-cursor tap1", "type": "integer", - "format": "uint8", - "minimum": 0 + "format": "int32" }, - "vlan_id": { + "pre2": { "nullable": true, - "description": "The VLAN identifier for the route. Use this if the gateway is reachable over an 802.1Q tagged L2 segment.", + "description": "Pre-cursor tap2", + "type": "integer", + "format": "int32" + } + } + }, + "TypedUuidForInstanceKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForSupportBundleKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForWebhookEventKind": { + "type": "string", + "format": "uuid" + }, + "TypedUuidForWebhookReceiverKind": { + "type": "string", + "format": "uuid" + }, + "UninitializedSled": { + "description": "A sled that has not been added to an initialized rack yet", + "type": "object", + "properties": { + "baseboard": { + "$ref": "#/components/schemas/Baseboard" + }, + "cubby": { "type": "integer", "format": "uint16", "minimum": 0 + }, + "rack_id": { + "type": "string", + "format": "uuid" } }, "required": [ - "dst", - "gw", - "interface_name", - "port_settings_id" + "baseboard", + "cubby", + "rack_id" ] }, - "SwitchPortSettings": { - "description": "A switch port settings identity whose id may be used to view additional details.", + "UninitializedSledId": { + "description": "The unique hardware ID for a sled", "type": "object", "properties": { - "description": { - "description": "human-readable free-form text about a resource", + "part": { "type": "string" }, - "id": { - "description": "unique, immutable, system-controlled identifier for each resource", - "type": "string", - "format": "uuid" - }, - "name": { - "description": "unique, mutable, user-controlled identifier for each resource", - "allOf": [ - { - "$ref": "#/components/schemas/Name" - } - ] - }, - "time_created": { - "description": "timestamp when this resource was created", - "type": "string", - "format": "date-time" - }, - "time_modified": { - "description": "timestamp when this resource was last modified", - "type": "string", - "format": "date-time" + "serial": { + "type": "string" } }, "required": [ - "description", - "id", - "name", - "time_created", - "time_modified" + "part", + "serial" ] }, - "SwitchPortSettingsCreate": { - "description": "Parameters for creating switch port settings. Switch port settings are the central data structure for setting up external networking. Switch port settings include link, interface, route, address and dynamic network protocol configuration.", + "UninitializedSledResultsPage": { + "description": "A single page of results", "type": "object", "properties": { - "addresses": { - "description": "Addresses indexed by interface name.", - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/AddressConfig" - } - }, - "bgp_peers": { - "description": "BGP peers indexed by interface name.", - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/BgpPeerConfig" - } - }, - "description": { - "type": "string" - }, - "groups": { + "items": { + "description": "list of items on this page of results", "type": "array", "items": { - "$ref": "#/components/schemas/NameOrId" + "$ref": "#/components/schemas/UninitializedSled" } }, - "interfaces": { - "description": "Interfaces indexed by link name.", - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/SwitchInterfaceConfigCreate" - } + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "Units": { + "description": "Measurement units for timeseries samples.", + "oneOf": [ + { + "type": "string", + "enum": [ + "count", + "bytes", + "seconds", + "nanoseconds", + "volts", + "amps", + "watts", + "degrees_celsius" + ] }, - "links": { - "description": "Links indexed by phy name. On ports that are not broken out, this is always phy0. On a 2x breakout the options are phy0 and phy1, on 4x phy0-phy3, etc.", - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/LinkConfigCreate" - } + { + "description": "No meaningful units, e.g. a dimensionless quanity.", + "type": "string", + "enum": [ + "none" + ] }, - "name": { - "$ref": "#/components/schemas/Name" + { + "description": "Rotations per minute.", + "type": "string", + "enum": [ + "rpm" + ] + } + ] + }, + "User": { + "description": "View of a User", + "type": "object", + "properties": { + "display_name": { + "description": "Human-readable name that can identify the user", + "type": "string" }, - "port_config": { - "$ref": "#/components/schemas/SwitchPortConfigCreate" + "id": { + "type": "string", + "format": "uuid" }, - "routes": { - "description": "Routes indexed by interface name.", - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/RouteConfig" - } + "silo_id": { + "description": "Uuid of the silo to which this user belongs", + "type": "string", + "format": "uuid" } }, - "required": [ - "addresses", - "bgp_peers", - "description", - "groups", - "interfaces", - "links", - "name", - "port_config", - "routes" + "required": [ + "display_name", + "id", + "silo_id" ] }, - "SwitchPortSettingsGroups": { - "description": "This structure maps a port settings object to a port settings groups. Port settings objects may inherit settings from groups. This mapping defines the relationship between settings objects and the groups they reference.", + "UserBuiltin": { + "description": "View of a Built-in User\n\nBuilt-in users are identities internal to the system, used when the control plane performs actions autonomously", "type": "object", "properties": { - "port_settings_group_id": { - "description": "The id of a port settings group being referenced by a port settings object.", + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", "type": "string", "format": "uuid" }, - "port_settings_id": { - "description": "The id of a port settings object referencing a port settings group.", + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "time_created": { + "description": "timestamp when this resource was created", "type": "string", - "format": "uuid" + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" } }, "required": [ - "port_settings_group_id", - "port_settings_id" + "description", + "id", + "name", + "time_created", + "time_modified" ] }, - "SwitchPortSettingsResultsPage": { + "UserBuiltinResultsPage": { "description": "A single page of results", "type": "object", "properties": { @@ -22806,7 +23998,7 @@ "description": "list of items on this page of results", "type": "array", "items": { - "$ref": "#/components/schemas/SwitchPortSettings" + "$ref": "#/components/schemas/UserBuiltin" } }, "next_page": { @@ -22819,110 +24011,80 @@ "items" ] }, - "SwitchPortSettingsView": { - "description": "This structure contains all port settings information in one place. It's a convenience data structure for getting a complete view of a particular port's settings.", + "UserCreate": { + "description": "Create-time parameters for a `User`", "type": "object", "properties": { - "addresses": { - "description": "Layer 3 IP address settings.", - "type": "array", - "items": { - "$ref": "#/components/schemas/SwitchPortAddressConfig" - } - }, - "bgp_peers": { - "description": "BGP peer settings.", - "type": "array", - "items": { - "$ref": "#/components/schemas/BgpPeer" - } - }, - "groups": { - "description": "Switch port settings included from other switch port settings groups.", - "type": "array", - "items": { - "$ref": "#/components/schemas/SwitchPortSettingsGroups" - } - }, - "interfaces": { - "description": "Layer 3 interface settings.", - "type": "array", - "items": { - "$ref": "#/components/schemas/SwitchInterfaceConfig" - } - }, - "link_lldp": { - "description": "Link-layer discovery protocol (LLDP) settings.", - "type": "array", - "items": { - "$ref": "#/components/schemas/LldpLinkConfig" - } - }, - "links": { - "description": "Layer 2 link settings.", - "type": "array", - "items": { - "$ref": "#/components/schemas/SwitchPortLinkConfig" - } - }, - "port": { - "description": "Layer 1 physical port settings.", + "external_id": { + "description": "username used to log in", "allOf": [ { - "$ref": "#/components/schemas/SwitchPortConfig" + "$ref": "#/components/schemas/UserId" } ] }, - "routes": { - "description": "IP route settings.", - "type": "array", - "items": { - "$ref": "#/components/schemas/SwitchPortRouteConfig" - } - }, - "settings": { - "description": "The primary switch port settings handle.", + "password": { + "description": "how to set the user's login password", "allOf": [ { - "$ref": "#/components/schemas/SwitchPortSettings" + "$ref": "#/components/schemas/UserPassword" } ] - }, - "tx_eq": { - "description": "TX equalization settings. These are optional, and most links will not need them.", - "type": "array", - "items": { - "nullable": true, - "allOf": [ - { - "$ref": "#/components/schemas/TxEqConfig" - } - ] - } - }, - "vlan_interfaces": { - "description": "Vlan interface settings.", - "type": "array", - "items": { - "$ref": "#/components/schemas/SwitchVlanInterfaceConfig" - } } }, "required": [ - "addresses", - "bgp_peers", - "groups", - "interfaces", - "link_lldp", - "links", - "port", - "routes", - "settings", - "tx_eq", - "vlan_interfaces" + "external_id", + "password" ] }, - "SwitchResultsPage": { + "UserId": { + "title": "A username for a local-only user", + "description": "Usernames must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Usernames cannot be a UUID, but they may contain a UUID. They can be at most 63 characters long.", + "type": "string", + "pattern": "^(?![0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$)^[a-z]([a-zA-Z0-9-]*[a-zA-Z0-9]+)?$", + "minLength": 1, + "maxLength": 63 + }, + "UserPassword": { + "description": "Parameters for setting a user's password", + "oneOf": [ + { + "description": "Sets the user's password to the provided value", + "type": "object", + "properties": { + "mode": { + "type": "string", + "enum": [ + "password" + ] + }, + "value": { + "$ref": "#/components/schemas/Password" + } + }, + "required": [ + "mode", + "value" + ] + }, + { + "description": "Invalidates any current password (disabling password authentication)", + "type": "object", + "properties": { + "mode": { + "type": "string", + "enum": [ + "login_disallowed" + ] + } + }, + "required": [ + "mode" + ] + } + ] + }, + "UserResultsPage": { "description": "A single page of results", "type": "object", "properties": { @@ -22930,7 +24092,7 @@ "description": "list of items on this page of results", "type": "array", "items": { - "$ref": "#/components/schemas/Switch" + "$ref": "#/components/schemas/User" } }, "next_page": { @@ -22943,405 +24105,390 @@ "items" ] }, - "SwitchVlanInterfaceConfig": { - "description": "A switch port VLAN interface configuration for a port settings object.", - "type": "object", - "properties": { - "interface_config_id": { - "description": "The switch interface configuration this VLAN interface configuration belongs to.", - "type": "string", - "format": "uuid" - }, - "vlan_id": { - "description": "The virtual network id for this interface that is used for producing and consuming 802.1Q Ethernet tags. This field has a maximum value of 4095 as 802.1Q tags are twelve bits.", - "type": "integer", - "format": "uint16", - "minimum": 0 - } - }, - "required": [ - "interface_config_id", - "vlan_id" - ] - }, - "Table": { - "description": "A table represents one or more timeseries with the same schema.\n\nA table is the result of an OxQL query. It contains a name, usually the name of the timeseries schema from which the data is derived, and any number of timeseries, which contain the actual data.", + "UsernamePasswordCredentials": { + "description": "Credentials for local user login", "type": "object", "properties": { - "name": { - "type": "string" + "password": { + "$ref": "#/components/schemas/Password" }, - "timeseries": { - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/Timeseries" - } + "username": { + "$ref": "#/components/schemas/UserId" } }, "required": [ - "name", - "timeseries" + "password", + "username" ] }, - "TargetRelease": { - "description": "View of a system software target release.", + "Utilization": { + "description": "View of the current silo's resource utilization and capacity", "type": "object", "properties": { - "generation": { - "description": "The target-release generation number.", - "type": "integer", - "format": "int64" - }, - "release_source": { - "description": "The source of the target release.", + "capacity": { + "description": "The total amount of resources that can be provisioned in this silo Actions that would exceed this limit will fail", "allOf": [ { - "$ref": "#/components/schemas/TargetReleaseSource" + "$ref": "#/components/schemas/VirtualResourceCounts" } ] }, - "time_requested": { - "description": "The time it was set as the target release.", - "type": "string", - "format": "date-time" + "provisioned": { + "description": "Accounts for resources allocated to running instances or storage allocated via disks or snapshots Note that CPU and memory resources associated with a stopped instances are not counted here whereas associated disks will still be counted", + "allOf": [ + { + "$ref": "#/components/schemas/VirtualResourceCounts" + } + ] } }, "required": [ - "generation", - "release_source", - "time_requested" + "capacity", + "provisioned" ] }, - "TargetReleaseSource": { - "description": "Source of a system software target release.", + "ValueArray": { + "description": "List of data values for one timeseries.\n\nEach element is an option, where `None` represents a missing sample.", "oneOf": [ { - "description": "Unspecified or unknown source (probably MUPdate).", "type": "object", "properties": { "type": { "type": "string", "enum": [ - "unspecified" + "integer" ] + }, + "values": { + "type": "array", + "items": { + "nullable": true, + "type": "integer", + "format": "int64" + } } }, "required": [ - "type" + "type", + "values" ] }, { - "description": "The specified release of the rack's system software.", "type": "object", "properties": { "type": { "type": "string", "enum": [ - "system_version" + "double" ] }, - "version": { - "type": "string", - "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$" + "values": { + "type": "array", + "items": { + "nullable": true, + "type": "number", + "format": "double" + } } }, "required": [ "type", - "version" + "values" ] - } - ] - }, - "Timeseries": { - "description": "A timeseries contains a timestamped set of values from one source.\n\nThis includes the typed key-value pairs that uniquely identify it, and the set of timestamps and data values from it.", - "type": "object", - "properties": { - "fields": { - "type": "object", - "additionalProperties": { - "$ref": "#/components/schemas/FieldValue" - } - }, - "points": { - "$ref": "#/components/schemas/Points" - } - }, - "required": [ - "fields", - "points" - ] - }, - "TimeseriesDescription": { - "description": "Text descriptions for the target and metric of a timeseries.", - "type": "object", - "properties": { - "metric": { - "type": "string" - }, - "target": { - "type": "string" - } - }, - "required": [ - "metric", - "target" - ] - }, - "TimeseriesName": { - "title": "The name of a timeseries", - "description": "Names are constructed by concatenating the target and metric names with ':'. Target and metric names must be lowercase alphanumeric characters with '_' separating words.", - "type": "string", - "pattern": "^(([a-z]+[a-z0-9]*)(_([a-z0-9]+))*):(([a-z]+[a-z0-9]*)(_([a-z0-9]+))*)$" - }, - "TimeseriesQuery": { - "description": "A timeseries query string, written in the Oximeter query language.", - "type": "object", - "properties": { - "query": { - "description": "A timeseries query string, written in the Oximeter query language.", - "type": "string" - } - }, - "required": [ - "query" - ] - }, - "TimeseriesSchema": { - "description": "The schema for a timeseries.\n\nThis includes the name of the timeseries, as well as the datum type of its metric and the schema for each field.", - "type": "object", - "properties": { - "authz_scope": { - "$ref": "#/components/schemas/AuthzScope" - }, - "created": { - "type": "string", - "format": "date-time" - }, - "datum_type": { - "$ref": "#/components/schemas/DatumType" }, - "description": { - "$ref": "#/components/schemas/TimeseriesDescription" - }, - "field_schema": { - "type": "array", - "items": { - "$ref": "#/components/schemas/FieldSchema" + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "boolean" + ] + }, + "values": { + "type": "array", + "items": { + "nullable": true, + "type": "boolean" + } + } }, - "uniqueItems": true - }, - "timeseries_name": { - "$ref": "#/components/schemas/TimeseriesName" - }, - "units": { - "$ref": "#/components/schemas/Units" - }, - "version": { - "type": "integer", - "format": "uint8", - "minimum": 1 - } - }, - "required": [ - "authz_scope", - "created", - "datum_type", - "description", - "field_schema", - "timeseries_name", - "units", - "version" - ] - }, - "TimeseriesSchemaResultsPage": { - "description": "A single page of results", - "type": "object", - "properties": { - "items": { - "description": "list of items on this page of results", - "type": "array", - "items": { - "$ref": "#/components/schemas/TimeseriesSchema" - } - }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", - "type": "string" - } - }, - "required": [ - "items" - ] - }, - "TxEqConfig": { - "description": "Per-port tx-eq overrides. This can be used to fine-tune the transceiver equalization settings to improve signal integrity.", - "type": "object", - "properties": { - "main": { - "nullable": true, - "description": "Main tap", - "type": "integer", - "format": "int32" - }, - "post1": { - "nullable": true, - "description": "Post-cursor tap1", - "type": "integer", - "format": "int32" + "required": [ + "type", + "values" + ] }, - "post2": { - "nullable": true, - "description": "Post-cursor tap2", - "type": "integer", - "format": "int32" + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "string" + ] + }, + "values": { + "type": "array", + "items": { + "nullable": true, + "type": "string" + } + } + }, + "required": [ + "type", + "values" + ] }, - "pre1": { - "nullable": true, - "description": "Pre-cursor tap1", - "type": "integer", - "format": "int32" + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "integer_distribution" + ] + }, + "values": { + "type": "array", + "items": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Distributionint64" + } + ] + } + } + }, + "required": [ + "type", + "values" + ] }, - "pre2": { - "nullable": true, - "description": "Pre-cursor tap2", - "type": "integer", - "format": "int32" + { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "double_distribution" + ] + }, + "values": { + "type": "array", + "items": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Distributiondouble" + } + ] + } + } + }, + "required": [ + "type", + "values" + ] } - } - }, - "TypedUuidForInstanceKind": { - "type": "string", - "format": "uuid" - }, - "TypedUuidForSupportBundleKind": { - "type": "string", - "format": "uuid" + ] }, - "UninitializedSled": { - "description": "A sled that has not been added to an initialized rack yet", + "Values": { + "description": "A single list of values, for one dimension of a timeseries.", "type": "object", "properties": { - "baseboard": { - "$ref": "#/components/schemas/Baseboard" - }, - "cubby": { - "type": "integer", - "format": "uint16", - "minimum": 0 + "metric_type": { + "description": "The type of this metric.", + "allOf": [ + { + "$ref": "#/components/schemas/MetricType" + } + ] }, - "rack_id": { - "type": "string", - "format": "uuid" + "values": { + "description": "The data values.", + "allOf": [ + { + "$ref": "#/components/schemas/ValueArray" + } + ] } }, "required": [ - "baseboard", - "cubby", - "rack_id" + "metric_type", + "values" ] }, - "UninitializedSledId": { - "description": "The unique hardware ID for a sled", + "VirtualResourceCounts": { + "description": "A collection of resource counts used to describe capacity and utilization", "type": "object", "properties": { - "part": { - "type": "string" + "cpus": { + "description": "Number of virtual CPUs", + "type": "integer", + "format": "int64" }, - "serial": { - "type": "string" + "memory": { + "description": "Amount of memory in bytes", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] + }, + "storage": { + "description": "Amount of disk storage in bytes", + "allOf": [ + { + "$ref": "#/components/schemas/ByteCount" + } + ] } }, "required": [ - "part", - "serial" + "cpus", + "memory", + "storage" ] }, - "UninitializedSledResultsPage": { - "description": "A single page of results", + "Vni": { + "description": "A Geneve Virtual Network Identifier", + "type": "integer", + "format": "uint32", + "minimum": 0 + }, + "Vpc": { + "description": "View of a VPC", "type": "object", "properties": { - "items": { - "description": "list of items on this page of results", - "type": "array", - "items": { - "$ref": "#/components/schemas/UninitializedSled" - } - }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", + "description": { + "description": "human-readable free-form text about a resource", "type": "string" - } - }, - "required": [ - "items" - ] - }, - "Units": { - "description": "Measurement units for timeseries samples.", - "oneOf": [ - { - "type": "string", - "enum": [ - "count", - "bytes", - "seconds", - "nanoseconds", - "volts", - "amps", - "watts", - "degrees_celsius" + }, + "dns_name": { + "description": "The name used for the VPC in DNS.", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } ] }, - { - "description": "No meaningful units, e.g. a dimensionless quanity.", + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", "type": "string", - "enum": [ - "none" + "format": "uuid" + }, + "ipv6_prefix": { + "description": "The unique local IPv6 address range for subnets in this VPC", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv6Net" + } ] }, - { - "description": "Rotations per minute.", - "type": "string", - "enum": [ - "rpm" + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } ] + }, + "project_id": { + "description": "id for the project containing this VPC", + "type": "string", + "format": "uuid" + }, + "system_router_id": { + "description": "id for the system router where subnet default routes are registered", + "type": "string", + "format": "uuid" + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" } + }, + "required": [ + "description", + "dns_name", + "id", + "ipv6_prefix", + "name", + "project_id", + "system_router_id", + "time_created", + "time_modified" ] }, - "User": { - "description": "View of a User", + "VpcCreate": { + "description": "Create-time parameters for a `Vpc`", "type": "object", "properties": { - "display_name": { - "description": "Human-readable name that can identify the user", + "description": { "type": "string" }, - "id": { - "type": "string", - "format": "uuid" + "dns_name": { + "$ref": "#/components/schemas/Name" }, - "silo_id": { - "description": "Uuid of the silo to which this user belongs", - "type": "string", - "format": "uuid" + "ipv6_prefix": { + "nullable": true, + "description": "The IPv6 prefix for this VPC\n\nAll IPv6 subnets created from this VPC must be taken from this range, which should be a Unique Local Address in the range `fd00::/48`. The default VPC Subnet will have the first `/64` range from this prefix.", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv6Net" + } + ] + }, + "name": { + "$ref": "#/components/schemas/Name" } }, "required": [ - "display_name", - "id", - "silo_id" + "description", + "dns_name", + "name" ] }, - "UserBuiltin": { - "description": "View of a Built-in User\n\nBuilt-in users are identities internal to the system, used when the control plane performs actions autonomously", + "VpcFirewallRule": { + "description": "A single rule in a VPC firewall", "type": "object", "properties": { + "action": { + "description": "Whether traffic matching the rule should be allowed or dropped", + "allOf": [ + { + "$ref": "#/components/schemas/VpcFirewallRuleAction" + } + ] + }, "description": { "description": "human-readable free-form text about a resource", "type": "string" }, + "direction": { + "description": "Whether this rule is for incoming or outgoing traffic", + "allOf": [ + { + "$ref": "#/components/schemas/VpcFirewallRuleDirection" + } + ] + }, + "filters": { + "description": "Reductions on the scope of the rule", + "allOf": [ + { + "$ref": "#/components/schemas/VpcFirewallRuleFilter" + } + ] + }, "id": { "description": "unique, immutable, system-controlled identifier for each resource", "type": "string", @@ -23355,6 +24502,27 @@ } ] }, + "priority": { + "description": "The relative priority of this rule", + "type": "integer", + "format": "uint16", + "minimum": 0 + }, + "status": { + "description": "Whether this rule is in effect", + "allOf": [ + { + "$ref": "#/components/schemas/VpcFirewallRuleStatus" + } + ] + }, + "targets": { + "description": "Determine the set of instances that the rule applies to", + "type": "array", + "items": { + "$ref": "#/components/schemas/VpcFirewallRuleTarget" + } + }, "time_created": { "description": "timestamp when this resource was created", "type": "string", @@ -23364,484 +24532,294 @@ "description": "timestamp when this resource was last modified", "type": "string", "format": "date-time" + }, + "vpc_id": { + "description": "The VPC to which this rule belongs", + "type": "string", + "format": "uuid" } }, "required": [ + "action", "description", + "direction", + "filters", "id", "name", + "priority", + "status", + "targets", "time_created", - "time_modified" - ] - }, - "UserBuiltinResultsPage": { - "description": "A single page of results", - "type": "object", - "properties": { - "items": { - "description": "list of items on this page of results", - "type": "array", - "items": { - "$ref": "#/components/schemas/UserBuiltin" - } - }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", - "type": "string" - } - }, - "required": [ - "items" + "time_modified", + "vpc_id" ] }, - "UserCreate": { - "description": "Create-time parameters for a `User`", - "type": "object", - "properties": { - "external_id": { - "description": "username used to log in", - "allOf": [ - { - "$ref": "#/components/schemas/UserId" - } - ] - }, - "password": { - "description": "how to set the user's login password", - "allOf": [ - { - "$ref": "#/components/schemas/UserPassword" - } - ] - } - }, - "required": [ - "external_id", - "password" + "VpcFirewallRuleAction": { + "type": "string", + "enum": [ + "allow", + "deny" ] }, - "UserId": { - "title": "A username for a local-only user", - "description": "Usernames must begin with a lower case ASCII letter, be composed exclusively of lowercase ASCII, uppercase ASCII, numbers, and '-', and may not end with a '-'. Usernames cannot be a UUID, but they may contain a UUID. They can be at most 63 characters long.", + "VpcFirewallRuleDirection": { "type": "string", - "pattern": "^(?![0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$)^[a-z]([a-zA-Z0-9-]*[a-zA-Z0-9]+)?$", - "minLength": 1, - "maxLength": 63 - }, - "UserPassword": { - "description": "Parameters for setting a user's password", - "oneOf": [ - { - "description": "Sets the user's password to the provided value", - "type": "object", - "properties": { - "mode": { - "type": "string", - "enum": [ - "password" - ] - }, - "value": { - "$ref": "#/components/schemas/Password" - } - }, - "required": [ - "mode", - "value" - ] - }, - { - "description": "Invalidates any current password (disabling password authentication)", - "type": "object", - "properties": { - "mode": { - "type": "string", - "enum": [ - "login_disallowed" - ] - } - }, - "required": [ - "mode" - ] - } + "enum": [ + "inbound", + "outbound" ] }, - "UserResultsPage": { - "description": "A single page of results", + "VpcFirewallRuleFilter": { + "description": "Filters reduce the scope of a firewall rule. Without filters, the rule applies to all packets to the targets (or from the targets, if it's an outbound rule). With multiple filters, the rule applies only to packets matching ALL filters. The maximum number of each type of filter is 256.", "type": "object", "properties": { - "items": { - "description": "list of items on this page of results", + "hosts": { + "nullable": true, + "description": "If present, host filters match the \"other end\" of traffic from the target’s perspective: for an inbound rule, they match the source of traffic. For an outbound rule, they match the destination.", "type": "array", "items": { - "$ref": "#/components/schemas/User" - } + "$ref": "#/components/schemas/VpcFirewallRuleHostFilter" + }, + "maxItems": 256 }, - "next_page": { + "ports": { "nullable": true, - "description": "token used to fetch the next page of results (if any)", - "type": "string" - } - }, - "required": [ - "items" - ] - }, - "UsernamePasswordCredentials": { - "description": "Credentials for local user login", - "type": "object", - "properties": { - "password": { - "$ref": "#/components/schemas/Password" - }, - "username": { - "$ref": "#/components/schemas/UserId" - } - }, - "required": [ - "password", - "username" - ] - }, - "Utilization": { - "description": "View of the current silo's resource utilization and capacity", - "type": "object", - "properties": { - "capacity": { - "description": "The total amount of resources that can be provisioned in this silo Actions that would exceed this limit will fail", - "allOf": [ - { - "$ref": "#/components/schemas/VirtualResourceCounts" - } - ] + "description": "If present, the destination ports or port ranges this rule applies to.", + "type": "array", + "items": { + "$ref": "#/components/schemas/L4PortRange" + }, + "maxItems": 256 }, - "provisioned": { - "description": "Accounts for resources allocated to running instances or storage allocated via disks or snapshots Note that CPU and memory resources associated with a stopped instances are not counted here whereas associated disks will still be counted", - "allOf": [ - { - "$ref": "#/components/schemas/VirtualResourceCounts" - } - ] + "protocols": { + "nullable": true, + "description": "If present, the networking protocols this rule applies to.", + "type": "array", + "items": { + "$ref": "#/components/schemas/VpcFirewallRuleProtocol" + }, + "maxItems": 256 } - }, - "required": [ - "capacity", - "provisioned" - ] + } }, - "ValueArray": { - "description": "List of data values for one timeseries.\n\nEach element is an option, where `None` represents a missing sample.", + "VpcFirewallRuleHostFilter": { + "description": "The `VpcFirewallRuleHostFilter` is used to filter traffic on the basis of its source or destination host.", "oneOf": [ { + "description": "The rule applies to traffic from/to all instances in the VPC", "type": "object", "properties": { "type": { "type": "string", "enum": [ - "integer" - ] - }, - "values": { - "type": "array", - "items": { - "nullable": true, - "type": "integer", - "format": "int64" - } - } - }, - "required": [ - "type", - "values" - ] - }, - { - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "double" + "vpc" ] }, - "values": { - "type": "array", - "items": { - "nullable": true, - "type": "number", - "format": "double" - } + "value": { + "$ref": "#/components/schemas/Name" } }, "required": [ "type", - "values" + "value" ] }, { + "description": "The rule applies to traffic from/to all instances in the VPC Subnet", "type": "object", "properties": { "type": { "type": "string", "enum": [ - "boolean" + "subnet" ] }, - "values": { - "type": "array", - "items": { - "nullable": true, - "type": "boolean" - } + "value": { + "$ref": "#/components/schemas/Name" } }, "required": [ "type", - "values" + "value" ] }, { + "description": "The rule applies to traffic from/to this specific instance", "type": "object", "properties": { "type": { "type": "string", "enum": [ - "string" + "instance" ] }, - "values": { - "type": "array", - "items": { - "nullable": true, - "type": "string" - } + "value": { + "$ref": "#/components/schemas/Name" } }, "required": [ "type", - "values" + "value" ] }, { + "description": "The rule applies to traffic from/to a specific IP address", "type": "object", "properties": { "type": { "type": "string", "enum": [ - "integer_distribution" + "ip" ] }, - "values": { - "type": "array", - "items": { - "nullable": true, - "allOf": [ - { - "$ref": "#/components/schemas/Distributionint64" - } - ] - } + "value": { + "type": "string", + "format": "ip" } }, "required": [ "type", - "values" + "value" ] }, { + "description": "The rule applies to traffic from/to a specific IP subnet", "type": "object", "properties": { "type": { "type": "string", "enum": [ - "double_distribution" + "ip_net" ] }, - "values": { - "type": "array", - "items": { - "nullable": true, - "allOf": [ - { - "$ref": "#/components/schemas/Distributiondouble" - } - ] - } + "value": { + "$ref": "#/components/schemas/IpNet" } }, "required": [ "type", - "values" + "value" ] } ] }, - "Values": { - "description": "A single list of values, for one dimension of a timeseries.", - "type": "object", - "properties": { - "metric_type": { - "description": "The type of this metric.", - "allOf": [ - { - "$ref": "#/components/schemas/MetricType" - } - ] - }, - "values": { - "description": "The data values.", - "allOf": [ - { - "$ref": "#/components/schemas/ValueArray" - } - ] - } - }, - "required": [ - "metric_type", - "values" + "VpcFirewallRuleProtocol": { + "description": "The protocols that may be specified in a firewall rule's filter", + "type": "string", + "enum": [ + "TCP", + "UDP", + "ICMP" ] }, - "VirtualResourceCounts": { - "description": "A collection of resource counts used to describe capacity and utilization", - "type": "object", - "properties": { - "cpus": { - "description": "Number of virtual CPUs", - "type": "integer", - "format": "int64" - }, - "memory": { - "description": "Amount of memory in bytes", - "allOf": [ - { - "$ref": "#/components/schemas/ByteCount" - } - ] - }, - "storage": { - "description": "Amount of disk storage in bytes", - "allOf": [ - { - "$ref": "#/components/schemas/ByteCount" - } - ] - } - }, - "required": [ - "cpus", - "memory", - "storage" + "VpcFirewallRuleStatus": { + "type": "string", + "enum": [ + "disabled", + "enabled" ] }, - "Vni": { - "description": "A Geneve Virtual Network Identifier", - "type": "integer", - "format": "uint32", - "minimum": 0 - }, - "Vpc": { - "description": "View of a VPC", - "type": "object", - "properties": { - "description": { - "description": "human-readable free-form text about a resource", - "type": "string" - }, - "dns_name": { - "description": "The name used for the VPC in DNS.", - "allOf": [ - { + "VpcFirewallRuleTarget": { + "description": "A `VpcFirewallRuleTarget` is used to specify the set of instances to which a firewall rule applies. You can target instances directly by name, or specify a VPC, VPC subnet, IP, or IP subnet, which will apply the rule to traffic going to all matching instances. Targets are additive: the rule applies to instances matching ANY target.", + "oneOf": [ + { + "description": "The rule applies to all instances in the VPC", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "vpc" + ] + }, + "value": { "$ref": "#/components/schemas/Name" } + }, + "required": [ + "type", + "value" ] }, - "id": { - "description": "unique, immutable, system-controlled identifier for each resource", - "type": "string", - "format": "uuid" - }, - "ipv6_prefix": { - "description": "The unique local IPv6 address range for subnets in this VPC", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv6Net" - } - ] - }, - "name": { - "description": "unique, mutable, user-controlled identifier for each resource", - "allOf": [ - { + { + "description": "The rule applies to all instances in the VPC Subnet", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "subnet" + ] + }, + "value": { "$ref": "#/components/schemas/Name" } + }, + "required": [ + "type", + "value" ] }, - "project_id": { - "description": "id for the project containing this VPC", - "type": "string", - "format": "uuid" - }, - "system_router_id": { - "description": "id for the system router where subnet default routes are registered", - "type": "string", - "format": "uuid" - }, - "time_created": { - "description": "timestamp when this resource was created", - "type": "string", - "format": "date-time" - }, - "time_modified": { - "description": "timestamp when this resource was last modified", - "type": "string", - "format": "date-time" - } - }, - "required": [ - "description", - "dns_name", - "id", - "ipv6_prefix", - "name", - "project_id", - "system_router_id", - "time_created", - "time_modified" - ] - }, - "VpcCreate": { - "description": "Create-time parameters for a `Vpc`", - "type": "object", - "properties": { - "description": { - "type": "string" - }, - "dns_name": { - "$ref": "#/components/schemas/Name" + { + "description": "The rule applies to this specific instance", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "instance" + ] + }, + "value": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "type", + "value" + ] }, - "ipv6_prefix": { - "nullable": true, - "description": "The IPv6 prefix for this VPC\n\nAll IPv6 subnets created from this VPC must be taken from this range, which should be a Unique Local Address in the range `fd00::/48`. The default VPC Subnet will have the first `/64` range from this prefix.", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv6Net" + { + "description": "The rule applies to a specific IP address", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "ip" + ] + }, + "value": { + "type": "string", + "format": "ip" } + }, + "required": [ + "type", + "value" ] }, - "name": { - "$ref": "#/components/schemas/Name" + { + "description": "The rule applies to a specific IP subnet", + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": [ + "ip_net" + ] + }, + "value": { + "$ref": "#/components/schemas/IpNet" + } + }, + "required": [ + "type", + "value" + ] } - }, - "required": [ - "description", - "dns_name", - "name" ] }, - "VpcFirewallRule": { + "VpcFirewallRuleUpdate": { "description": "A single rule in a VPC firewall", "type": "object", "properties": { @@ -23854,7 +24832,7 @@ ] }, "description": { - "description": "human-readable free-form text about a resource", + "description": "Human-readable free-form text about a resource", "type": "string" }, "direction": { @@ -23873,13 +24851,8 @@ } ] }, - "id": { - "description": "unique, immutable, system-controlled identifier for each resource", - "type": "string", - "format": "uuid" - }, "name": { - "description": "unique, mutable, user-controlled identifier for each resource", + "description": "Name of the rule, unique to this VPC", "allOf": [ { "$ref": "#/components/schemas/Name" @@ -23905,22 +24878,8 @@ "type": "array", "items": { "$ref": "#/components/schemas/VpcFirewallRuleTarget" - } - }, - "time_created": { - "description": "timestamp when this resource was created", - "type": "string", - "format": "date-time" - }, - "time_modified": { - "description": "timestamp when this resource was last modified", - "type": "string", - "format": "date-time" - }, - "vpc_id": { - "description": "The VPC to which this rule belongs", - "type": "string", - "format": "uuid" + }, + "maxItems": 256 } }, "required": [ @@ -23928,481 +24887,575 @@ "description", "direction", "filters", - "id", "name", "priority", "status", - "targets", - "time_created", - "time_modified", - "vpc_id" - ] - }, - "VpcFirewallRuleAction": { - "type": "string", - "enum": [ - "allow", - "deny" + "targets" ] }, - "VpcFirewallRuleDirection": { - "type": "string", - "enum": [ - "inbound", - "outbound" + "VpcFirewallRuleUpdateParams": { + "description": "Updated list of firewall rules. Will replace all existing rules.", + "type": "object", + "properties": { + "rules": { + "type": "array", + "items": { + "$ref": "#/components/schemas/VpcFirewallRuleUpdate" + }, + "maxItems": 1024 + } + }, + "required": [ + "rules" ] }, - "VpcFirewallRuleFilter": { - "description": "Filters reduce the scope of a firewall rule. Without filters, the rule applies to all packets to the targets (or from the targets, if it's an outbound rule). With multiple filters, the rule applies only to packets matching ALL filters. The maximum number of each type of filter is 256.", + "VpcFirewallRules": { + "description": "Collection of a Vpc's firewall rules", "type": "object", "properties": { - "hosts": { - "nullable": true, - "description": "If present, host filters match the \"other end\" of traffic from the target’s perspective: for an inbound rule, they match the source of traffic. For an outbound rule, they match the destination.", + "rules": { "type": "array", "items": { - "$ref": "#/components/schemas/VpcFirewallRuleHostFilter" - }, - "maxItems": 256 - }, - "ports": { - "nullable": true, - "description": "If present, the destination ports or port ranges this rule applies to.", + "$ref": "#/components/schemas/VpcFirewallRule" + } + } + }, + "required": [ + "rules" + ] + }, + "VpcResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", "type": "array", "items": { - "$ref": "#/components/schemas/L4PortRange" - }, - "maxItems": 256 + "$ref": "#/components/schemas/Vpc" + } }, - "protocols": { + "next_page": { "nullable": true, - "description": "If present, the networking protocols this rule applies to.", - "type": "array", - "items": { - "$ref": "#/components/schemas/VpcFirewallRuleProtocol" - }, - "maxItems": 256 + "description": "token used to fetch the next page of results (if any)", + "type": "string" } - } + }, + "required": [ + "items" + ] }, - "VpcFirewallRuleHostFilter": { - "description": "The `VpcFirewallRuleHostFilter` is used to filter traffic on the basis of its source or destination host.", - "oneOf": [ - { - "description": "The rule applies to traffic from/to all instances in the VPC", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "vpc" - ] - }, - "value": { - "$ref": "#/components/schemas/Name" - } - }, - "required": [ - "type", - "value" - ] - }, - { - "description": "The rule applies to traffic from/to all instances in the VPC Subnet", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "subnet" - ] - }, - "value": { - "$ref": "#/components/schemas/Name" - } - }, - "required": [ - "type", - "value" - ] - }, - { - "description": "The rule applies to traffic from/to this specific instance", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "instance" - ] - }, - "value": { - "$ref": "#/components/schemas/Name" - } - }, - "required": [ - "type", - "value" - ] + "VpcRouter": { + "description": "A VPC router defines a series of rules that indicate where traffic should be sent depending on its destination.", + "type": "object", + "properties": { + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" }, - { - "description": "The rule applies to traffic from/to a specific IP address", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "ip" - ] - }, - "value": { - "type": "string", - "format": "ip" - } - }, - "required": [ - "type", - "value" - ] + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" }, - { - "description": "The rule applies to traffic from/to a specific IP subnet", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "ip_net" - ] - }, - "value": { - "$ref": "#/components/schemas/IpNet" + "kind": { + "$ref": "#/components/schemas/VpcRouterKind" + }, + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" } - }, - "required": [ - "type", - "value" ] + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + }, + "vpc_id": { + "description": "The VPC to which the router belongs.", + "type": "string", + "format": "uuid" } + }, + "required": [ + "description", + "id", + "kind", + "name", + "time_created", + "time_modified", + "vpc_id" ] }, - "VpcFirewallRuleProtocol": { - "description": "The protocols that may be specified in a firewall rule's filter", - "type": "string", - "enum": [ - "TCP", - "UDP", - "ICMP" + "VpcRouterCreate": { + "description": "Create-time parameters for a `VpcRouter`", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "name": { + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "description", + "name" ] }, - "VpcFirewallRuleStatus": { + "VpcRouterKind": { "type": "string", "enum": [ - "disabled", - "enabled" + "system", + "custom" ] }, - "VpcFirewallRuleTarget": { - "description": "A `VpcFirewallRuleTarget` is used to specify the set of instances to which a firewall rule applies. You can target instances directly by name, or specify a VPC, VPC subnet, IP, or IP subnet, which will apply the rule to traffic going to all matching instances. Targets are additive: the rule applies to instances matching ANY target.", - "oneOf": [ - { - "description": "The rule applies to all instances in the VPC", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "vpc" - ] - }, - "value": { - "$ref": "#/components/schemas/Name" - } - }, - "required": [ - "type", - "value" - ] + "VpcRouterResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/VpcRouter" + } }, - { - "description": "The rule applies to all instances in the VPC Subnet", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "subnet" - ] - }, - "value": { + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "VpcRouterUpdate": { + "description": "Updateable properties of a `VpcRouter`", + "type": "object", + "properties": { + "description": { + "nullable": true, + "type": "string" + }, + "name": { + "nullable": true, + "allOf": [ + { "$ref": "#/components/schemas/Name" } - }, - "required": [ - "type", - "value" ] + } + } + }, + "VpcSubnet": { + "description": "A VPC subnet represents a logical grouping for instances that allows network traffic between them, within a IPv4 subnetwork or optionally an IPv6 subnetwork.", + "type": "object", + "properties": { + "custom_router_id": { + "nullable": true, + "description": "ID for an attached custom router.", + "type": "string", + "format": "uuid" }, - { - "description": "The rule applies to this specific instance", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "instance" - ] - }, - "value": { - "$ref": "#/components/schemas/Name" + "description": { + "description": "human-readable free-form text about a resource", + "type": "string" + }, + "id": { + "description": "unique, immutable, system-controlled identifier for each resource", + "type": "string", + "format": "uuid" + }, + "ipv4_block": { + "description": "The IPv4 subnet CIDR block.", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv4Net" } - }, - "required": [ - "type", - "value" ] }, - { - "description": "The rule applies to a specific IP address", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "ip" - ] - }, - "value": { - "type": "string", - "format": "ip" + "ipv6_block": { + "description": "The IPv6 subnet CIDR block.", + "allOf": [ + { + "$ref": "#/components/schemas/Ipv6Net" } - }, - "required": [ - "type", - "value" ] }, - { - "description": "The rule applies to a specific IP subnet", - "type": "object", - "properties": { - "type": { - "type": "string", - "enum": [ - "ip_net" - ] - }, - "value": { - "$ref": "#/components/schemas/IpNet" + "name": { + "description": "unique, mutable, user-controlled identifier for each resource", + "allOf": [ + { + "$ref": "#/components/schemas/Name" } - }, - "required": [ - "type", - "value" ] + }, + "time_created": { + "description": "timestamp when this resource was created", + "type": "string", + "format": "date-time" + }, + "time_modified": { + "description": "timestamp when this resource was last modified", + "type": "string", + "format": "date-time" + }, + "vpc_id": { + "description": "The VPC to which the subnet belongs.", + "type": "string", + "format": "uuid" } + }, + "required": [ + "description", + "id", + "ipv4_block", + "ipv6_block", + "name", + "time_created", + "time_modified", + "vpc_id" ] }, - "VpcFirewallRuleUpdate": { - "description": "A single rule in a VPC firewall", + "VpcSubnetCreate": { + "description": "Create-time parameters for a `VpcSubnet`", "type": "object", "properties": { - "action": { - "description": "Whether traffic matching the rule should be allowed or dropped", + "custom_router": { + "nullable": true, + "description": "An optional router, used to direct packets sent from hosts in this subnet to any destination address.\n\nCustom routers apply in addition to the VPC-wide *system* router, and have higher priority than the system router for an otherwise equal-prefix-length match.", "allOf": [ { - "$ref": "#/components/schemas/VpcFirewallRuleAction" + "$ref": "#/components/schemas/NameOrId" } ] }, "description": { - "description": "Human-readable free-form text about a resource", "type": "string" }, - "direction": { - "description": "Whether this rule is for incoming or outgoing traffic", + "ipv4_block": { + "description": "The IPv4 address range for this subnet.\n\nIt must be allocated from an RFC 1918 private address range, and must not overlap with any other existing subnet in the VPC.", "allOf": [ { - "$ref": "#/components/schemas/VpcFirewallRuleDirection" + "$ref": "#/components/schemas/Ipv4Net" } ] }, - "filters": { - "description": "Reductions on the scope of the rule", + "ipv6_block": { + "nullable": true, + "description": "The IPv6 address range for this subnet.\n\nIt must be allocated from the RFC 4193 Unique Local Address range, with the prefix equal to the parent VPC's prefix. A random `/64` block will be assigned if one is not provided. It must not overlap with any existing subnet in the VPC.", "allOf": [ { - "$ref": "#/components/schemas/VpcFirewallRuleFilter" + "$ref": "#/components/schemas/Ipv6Net" } ] }, "name": { - "description": "Name of the rule, unique to this VPC", + "$ref": "#/components/schemas/Name" + } + }, + "required": [ + "description", + "ipv4_block", + "name" + ] + }, + "VpcSubnetResultsPage": { + "description": "A single page of results", + "type": "object", + "properties": { + "items": { + "description": "list of items on this page of results", + "type": "array", + "items": { + "$ref": "#/components/schemas/VpcSubnet" + } + }, + "next_page": { + "nullable": true, + "description": "token used to fetch the next page of results (if any)", + "type": "string" + } + }, + "required": [ + "items" + ] + }, + "VpcSubnetUpdate": { + "description": "Updateable properties of a `VpcSubnet`", + "type": "object", + "properties": { + "custom_router": { + "nullable": true, + "description": "An optional router, used to direct packets sent from hosts in this subnet to any destination address.", "allOf": [ { - "$ref": "#/components/schemas/Name" + "$ref": "#/components/schemas/NameOrId" } ] }, - "priority": { - "description": "The relative priority of this rule", - "type": "integer", - "format": "uint16", - "minimum": 0 + "description": { + "nullable": true, + "type": "string" }, - "status": { - "description": "Whether this rule is in effect", + "name": { + "nullable": true, "allOf": [ { - "$ref": "#/components/schemas/VpcFirewallRuleStatus" + "$ref": "#/components/schemas/Name" } ] - }, - "targets": { - "description": "Determine the set of instances that the rule applies to", - "type": "array", - "items": { - "$ref": "#/components/schemas/VpcFirewallRuleTarget" - }, - "maxItems": 256 } - }, - "required": [ - "action", - "description", - "direction", - "filters", - "name", - "priority", - "status", - "targets" - ] + } }, - "VpcFirewallRuleUpdateParams": { - "description": "Updated list of firewall rules. Will replace all existing rules.", + "VpcUpdate": { + "description": "Updateable properties of a `Vpc`", "type": "object", "properties": { - "rules": { - "type": "array", - "items": { - "$ref": "#/components/schemas/VpcFirewallRuleUpdate" - }, - "maxItems": 1024 + "description": { + "nullable": true, + "type": "string" + }, + "dns_name": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] + }, + "name": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/Name" + } + ] } - }, - "required": [ - "rules" - ] + } }, - "VpcFirewallRules": { - "description": "Collection of a Vpc's firewall rules", + "WebhookCreate": { + "description": "Create-time identity-related parameters", "type": "object", "properties": { - "rules": { + "description": { + "type": "string" + }, + "endpoint": { + "description": "The URL that webhook notification requests should be sent to", + "type": "string", + "format": "uri" + }, + "events": { + "description": "A list of webhook event classes to subscribe to.\n\nIf this list is empty or is not included in the request body, the webhook will not be subscribed to any events.", + "default": [], "type": "array", "items": { - "$ref": "#/components/schemas/VpcFirewallRule" + "type": "string" + } + }, + "name": { + "$ref": "#/components/schemas/Name" + }, + "secrets": { + "description": "A non-empty list of secret keys used to sign webhook payloads.", + "type": "array", + "items": { + "type": "string" } } }, "required": [ - "rules" + "description", + "endpoint", + "name", + "secrets" ] }, - "VpcResultsPage": { - "description": "A single page of results", + "WebhookDelivery": { + "description": "A delivery of a webhook event.", "type": "object", "properties": { - "items": { - "description": "list of items on this page of results", + "attempts": { + "description": "Individual attempts to deliver this webhook event, and their outcomes.", "type": "array", "items": { - "$ref": "#/components/schemas/Vpc" + "$ref": "#/components/schemas/WebhookDeliveryAttempt" } }, - "next_page": { - "nullable": true, - "description": "token used to fetch the next page of results (if any)", + "event_class": { + "description": "The event class.", "type": "string" + }, + "event_id": { + "description": "The UUID of the event.", + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForWebhookEventKind" + } + ] + }, + "id": { + "description": "The UUID of this delivery attempt.", + "type": "string", + "format": "uuid" + }, + "state": { + "description": "The state of this delivery.", + "allOf": [ + { + "$ref": "#/components/schemas/WebhookDeliveryState" + } + ] + }, + "time_started": { + "description": "The time at which this delivery began (i.e. the event was dispatched to the receiver).", + "type": "string", + "format": "date-time" + }, + "trigger": { + "description": "Why this delivery was performed.", + "allOf": [ + { + "$ref": "#/components/schemas/WebhookDeliveryTrigger" + } + ] + }, + "webhook_id": { + "description": "The UUID of the webhook receiver that this event was delivered to.", + "allOf": [ + { + "$ref": "#/components/schemas/TypedUuidForWebhookReceiverKind" + } + ] } }, "required": [ - "items" + "attempts", + "event_class", + "event_id", + "id", + "state", + "time_started", + "trigger", + "webhook_id" ] }, - "VpcRouter": { - "description": "A VPC router defines a series of rules that indicate where traffic should be sent depending on its destination.", + "WebhookDeliveryAttempt": { + "description": "An individual delivery attempt for a webhook event.\n\nThis represents a single HTTP request that was sent to the receiver, and its outcome.", "type": "object", "properties": { - "description": { - "description": "human-readable free-form text about a resource", - "type": "string" - }, - "id": { - "description": "unique, immutable, system-controlled identifier for each resource", - "type": "string", - "format": "uuid" + "attempt": { + "description": "The attempt number.", + "type": "integer", + "format": "uint", + "minimum": 0 }, - "kind": { - "$ref": "#/components/schemas/VpcRouterKind" + "response": { + "nullable": true, + "allOf": [ + { + "$ref": "#/components/schemas/WebhookDeliveryResponse" + } + ] }, - "name": { - "description": "unique, mutable, user-controlled identifier for each resource", + "result": { + "description": "The outcome of this delivery attempt: either the event was delivered successfully, or the request failed for one of several reasons.", "allOf": [ { - "$ref": "#/components/schemas/Name" + "$ref": "#/components/schemas/WebhookDeliveryAttemptResult" } ] }, - "time_created": { - "description": "timestamp when this resource was created", + "time_sent": { + "description": "The time at which the webhook delivery was attempted.", "type": "string", "format": "date-time" + } + }, + "required": [ + "attempt", + "result", + "time_sent" + ] + }, + "WebhookDeliveryAttemptResult": { + "oneOf": [ + { + "description": "The webhook event has been delivered successfully.", + "type": "string", + "enum": [ + "succeeded" + ] }, - "time_modified": { - "description": "timestamp when this resource was last modified", + { + "description": "A webhook request was sent to the endpoint, and it returned a HTTP error status code indicating an error.", "type": "string", - "format": "date-time" + "enum": [ + "failed_http_error" + ] }, - "vpc_id": { - "description": "The VPC to which the router belongs.", + { + "description": "The webhook request could not be sent to the receiver endpoint.", "type": "string", - "format": "uuid" + "enum": [ + "failed_unreachable" + ] + }, + { + "description": "A connection to the receiver endpoint was successfully established, but no response was received within the delivery timeout.", + "type": "string", + "enum": [ + "failed_timeout" + ] } - }, - "required": [ - "description", - "id", - "kind", - "name", - "time_created", - "time_modified", - "vpc_id" ] }, - "VpcRouterCreate": { - "description": "Create-time parameters for a `VpcRouter`", + "WebhookDeliveryId": { + "type": "object", + "properties": { + "delivery_id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "delivery_id" + ] + }, + "WebhookDeliveryResponse": { + "description": "The response received from a webhook receiver endpoint.", "type": "object", "properties": { - "description": { - "type": "string" + "duration_ms": { + "description": "The response time of the webhook endpoint, in milliseconds.", + "type": "integer", + "format": "uint", + "minimum": 0 }, - "name": { - "$ref": "#/components/schemas/Name" + "status": { + "description": "The HTTP status code returned from the webhook endpoint.", + "type": "integer", + "format": "uint16", + "minimum": 0 } }, "required": [ - "description", - "name" - ] - }, - "VpcRouterKind": { - "type": "string", - "enum": [ - "system", - "custom" + "duration_ms", + "status" ] }, - "VpcRouterResultsPage": { + "WebhookDeliveryResultsPage": { "description": "A single page of results", "type": "object", "properties": { @@ -24410,7 +25463,7 @@ "description": "list of items on this page of results", "type": "array", "items": { - "$ref": "#/components/schemas/VpcRouter" + "$ref": "#/components/schemas/WebhookDelivery" } }, "next_page": { @@ -24423,59 +25476,107 @@ "items" ] }, - "VpcRouterUpdate": { - "description": "Updateable properties of a `VpcRouter`", + "WebhookDeliveryState": { + "description": "The state of a webhook delivery attempt.", + "oneOf": [ + { + "description": "The webhook event has not yet been delivered successfully.\n\nEither no delivery attempts have yet been performed, or the delivery has failed at least once but has retries remaining.", + "type": "string", + "enum": [ + "pending" + ] + }, + { + "description": "The webhook event has been delivered successfully.", + "type": "string", + "enum": [ + "delivered" + ] + }, + { + "description": "The webhook delivery attempt has failed permanently and will not be retried again.", + "type": "string", + "enum": [ + "failed" + ] + } + ] + }, + "WebhookDeliveryTrigger": { + "description": "The reason a webhook event was delivered", + "oneOf": [ + { + "description": "Delivery was triggered by the event occurring for the first time.", + "type": "string", + "enum": [ + "event" + ] + }, + { + "description": "Delivery was triggered by a request to resend the event.", + "type": "string", + "enum": [ + "resend" + ] + }, + { + "description": "This delivery is a liveness probe.", + "type": "string", + "enum": [ + "probe" + ] + } + ] + }, + "WebhookProbeResult": { + "description": "Data describing the result of a webhook liveness probe attempt.", "type": "object", "properties": { - "description": { - "nullable": true, - "type": "string" - }, - "name": { - "nullable": true, + "probe": { + "description": "The outcome of the probe request.", "allOf": [ { - "$ref": "#/components/schemas/Name" + "$ref": "#/components/schemas/WebhookDelivery" } ] + }, + "resends_started": { + "nullable": true, + "description": "If the probe request succeeded, and resending failed deliveries on success was requested, the number of new delivery attempts started. Otherwise, if the probe did not succeed, or resending failed deliveries was not requested, this is null.\n\nNote that this may be 0, if there were no events found which had not been delivered successfully to this receiver.", + "type": "integer", + "format": "uint", + "minimum": 0 } - } + }, + "required": [ + "probe" + ] }, - "VpcSubnet": { - "description": "A VPC subnet represents a logical grouping for instances that allows network traffic between them, within a IPv4 subnetwork or optionally an IPv6 subnetwork.", + "WebhookReceiver": { + "description": "The configuration for a webhook.", "type": "object", "properties": { - "custom_router_id": { - "nullable": true, - "description": "ID for an attached custom router.", - "type": "string", - "format": "uuid" - }, "description": { "description": "human-readable free-form text about a resource", "type": "string" }, + "endpoint": { + "description": "The URL that webhook notification requests are sent to.", + "type": "string", + "format": "uri" + }, + "events": { + "description": "The list of event classes to which this receiver is subscribed.", + "type": "array", + "items": { + "type": "string" + } + }, "id": { "description": "unique, immutable, system-controlled identifier for each resource", "type": "string", "format": "uuid" }, - "ipv4_block": { - "description": "The IPv4 subnet CIDR block.", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv4Net" - } - ] - }, - "ipv6_block": { - "description": "The IPv6 subnet CIDR block.", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv6Net" - } - ] - }, "name": { "description": "unique, mutable, user-controlled identifier for each resource", "allOf": [ @@ -24484,6 +25585,12 @@ } ] }, + "secrets": { + "type": "array", + "items": { + "$ref": "#/components/schemas/WebhookSecretId" + } + }, "time_created": { "description": "timestamp when this resource was created", "type": "string", @@ -24493,68 +25600,20 @@ "description": "timestamp when this resource was last modified", "type": "string", "format": "date-time" - }, - "vpc_id": { - "description": "The VPC to which the subnet belongs.", - "type": "string", - "format": "uuid" } }, "required": [ "description", + "endpoint", + "events", "id", - "ipv4_block", - "ipv6_block", "name", + "secrets", "time_created", - "time_modified", - "vpc_id" - ] - }, - "VpcSubnetCreate": { - "description": "Create-time parameters for a `VpcSubnet`", - "type": "object", - "properties": { - "custom_router": { - "nullable": true, - "description": "An optional router, used to direct packets sent from hosts in this subnet to any destination address.\n\nCustom routers apply in addition to the VPC-wide *system* router, and have higher priority than the system router for an otherwise equal-prefix-length match.", - "allOf": [ - { - "$ref": "#/components/schemas/NameOrId" - } - ] - }, - "description": { - "type": "string" - }, - "ipv4_block": { - "description": "The IPv4 address range for this subnet.\n\nIt must be allocated from an RFC 1918 private address range, and must not overlap with any other existing subnet in the VPC.", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv4Net" - } - ] - }, - "ipv6_block": { - "nullable": true, - "description": "The IPv6 address range for this subnet.\n\nIt must be allocated from the RFC 4193 Unique Local Address range, with the prefix equal to the parent VPC's prefix. A random `/64` block will be assigned if one is not provided. It must not overlap with any existing subnet in the VPC.", - "allOf": [ - { - "$ref": "#/components/schemas/Ipv6Net" - } - ] - }, - "name": { - "$ref": "#/components/schemas/Name" - } - }, - "required": [ - "description", - "ipv4_block", - "name" + "time_modified" ] }, - "VpcSubnetResultsPage": { + "WebhookReceiverResultsPage": { "description": "A single page of results", "type": "object", "properties": { @@ -24562,7 +25621,7 @@ "description": "list of items on this page of results", "type": "array", "items": { - "$ref": "#/components/schemas/VpcSubnet" + "$ref": "#/components/schemas/WebhookReceiver" } }, "next_page": { @@ -24575,23 +25634,28 @@ "items" ] }, - "VpcSubnetUpdate": { - "description": "Updateable properties of a `VpcSubnet`", + "WebhookReceiverUpdate": { + "description": "Parameters to update a webhook configuration.", "type": "object", "properties": { - "custom_router": { - "nullable": true, - "description": "An optional router, used to direct packets sent from hosts in this subnet to any destination address.", - "allOf": [ - { - "$ref": "#/components/schemas/NameOrId" - } - ] - }, "description": { "nullable": true, "type": "string" }, + "endpoint": { + "nullable": true, + "description": "The URL that webhook notification requests should be sent to", + "type": "string", + "format": "uri" + }, + "events": { + "nullable": true, + "description": "A list of webhook event classes to subscribe to.\n\nIf this list is empty, the webhook will not be subscribed to any events.", + "type": "array", + "items": { + "type": "string" + } + }, "name": { "nullable": true, "allOf": [ @@ -24602,31 +25666,45 @@ } } }, - "VpcUpdate": { - "description": "Updateable properties of a `Vpc`", + "WebhookSecretCreate": { "type": "object", "properties": { - "description": { - "nullable": true, + "secret": { + "description": "The value of the shared secret key.", "type": "string" - }, - "dns_name": { - "nullable": true, - "allOf": [ - { - "$ref": "#/components/schemas/Name" - } - ] - }, - "name": { - "nullable": true, - "allOf": [ - { - "$ref": "#/components/schemas/Name" - } - ] } - } + }, + "required": [ + "secret" + ] + }, + "WebhookSecretId": { + "description": "The public ID of a secret key assigned to a webhook.", + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uuid" + } + }, + "required": [ + "id" + ] + }, + "WebhookSecrets": { + "description": "A list of the IDs of secrets associated with a webhook.", + "type": "object", + "properties": { + "secrets": { + "type": "array", + "items": { + "$ref": "#/components/schemas/WebhookSecretId" + } + } + }, + "required": [ + "secrets" + ] }, "NameOrIdSortMode": { "description": "Supported set of sort modes for scanning by name or id", @@ -24704,6 +25782,25 @@ ] } ] + }, + "TimeAndIdSortMode": { + "description": "Supported set of sort modes for scanning by timestamp and ID", + "oneOf": [ + { + "description": "sort in increasing order of timestamp and ID, i.e., earliest first", + "type": "string", + "enum": [ + "ascending" + ] + }, + { + "description": "sort in increasing order of timestamp and ID, i.e., most recent first", + "type": "string", + "enum": [ + "descending" + ] + } + ] } }, "responses": { @@ -24870,6 +25967,13 @@ { "name": "system/update" }, + { + "name": "system/webhooks", + "description": "Webhooks deliver notifications for audit log events and fault management alerts.", + "externalDocs": { + "url": "http://docs.oxide.computer/api/webhooks" + } + }, { "name": "vpcs", "description": "Virtual Private Clouds (VPCs) provide isolated network environments for managing and deploying services.", diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index 8ed50b73379..e1d6c297594 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -3542,7 +3542,7 @@ CREATE TABLE IF NOT EXISTS omicron.public.inv_nvme_disk_firmware ( -- the firmware version string for each NVMe slot (0 indexed), a NULL means the -- slot exists but is empty slot_firmware_versions STRING(8)[] CHECK (array_length(slot_firmware_versions, 1) BETWEEN 1 AND 7), - + -- PK consisting of: -- - Which collection this was -- - The sled reporting the disk @@ -4992,6 +4992,383 @@ CREATE UNIQUE INDEX IF NOT EXISTS one_record_per_volume_resource_usage on omicro region_snapshot_snapshot_id ); +/* + * WEBHOOKS + */ + + +/* + * Webhook receivers, receiver secrets, and receiver subscriptions. + */ + +CREATE TABLE IF NOT EXISTS omicron.public.webhook_receiver ( + /* Identity metadata (resource) */ + id UUID PRIMARY KEY, + name STRING(63) NOT NULL, + description STRING(512) NOT NULL, + time_created TIMESTAMPTZ NOT NULL, + time_modified TIMESTAMPTZ NOT NULL, + time_deleted TIMESTAMPTZ, + -- Child resource generation for secrets. + secret_gen INT NOT NULL, + + -- Child resource generation for subscriptions. This is separate from + -- `secret_gen`, as updating secrets and updating subscriptions are separate + -- operations which don't conflict with each other. + subscription_gen INT NOT NULL, + -- URL of the endpoint webhooks are delivered to. + endpoint STRING(512) NOT NULL +); + +CREATE UNIQUE INDEX IF NOT EXISTS lookup_webhook_rx_by_id +ON omicron.public.webhook_receiver (id) +WHERE + time_deleted IS NULL; + +CREATE UNIQUE INDEX IF NOT EXISTS lookup_webhook_rx_by_name +ON omicron.public.webhook_receiver ( + name +) WHERE + time_deleted IS NULL; + +CREATE TABLE IF NOT EXISTS omicron.public.webhook_secret ( + -- ID of this secret. + id UUID PRIMARY KEY, + time_created TIMESTAMPTZ NOT NULL, + -- N.B. that this will always be equal to `time_created` for secrets, as + -- they are never modified once created. + time_modified TIMESTAMPTZ NOT NULL, + time_deleted TIMESTAMPTZ, + -- UUID of the webhook receiver (foreign key into + -- `omicron.public.webhook_rx`) + rx_id UUID NOT NULL, + -- Secret value. + secret STRING(512) NOT NULL +); + +CREATE INDEX IF NOT EXISTS lookup_webhook_secrets_by_rx +ON omicron.public.webhook_secret ( + rx_id +) WHERE + time_deleted IS NULL; + +-- Webhook event classes. +-- +-- When creating new event classes, be sure to add them here! +CREATE TYPE IF NOT EXISTS omicron.public.webhook_event_class AS ENUM ( + -- Liveness probes, which are technically not real events, but, you know... + 'probe', + -- Test classes used to test globbing. + -- + -- These are not publicly exposed. + 'test.foo', + 'test.foo.bar', + 'test.foo.baz', + 'test.quux.bar', + 'test.quux.bar.baz' + -- Add new event classes here! +); + +-- The set of event class filters (either event class names or event class glob +-- patterns) associated with a webhook receiver. +-- +-- This is used when creating entries in the webhook_rx_subscription table to +-- indicate that a webhook receiver is interested in a given event class. +CREATE TABLE IF NOT EXISTS omicron.public.webhook_rx_event_glob ( + -- UUID of the webhook receiver (foreign key into + -- `omicron.public.webhook_rx`) + rx_id UUID NOT NULL, + -- An event class glob to which this receiver is subscribed. + glob STRING(512) NOT NULL, + -- Regex used when evaluating this filter against concrete event classes. + regex STRING(512) NOT NULL, + time_created TIMESTAMPTZ NOT NULL, + -- The database schema version at which this glob was last expanded. + -- + -- This is used to detect when a glob must be re-processed to generate exact + -- subscriptions on schema changes. + schema_version STRING(64) NOT NULL, + + PRIMARY KEY (rx_id, glob) +); + +-- Look up all event class globs for a webhook receiver. +CREATE INDEX IF NOT EXISTS lookup_webhook_event_globs_for_rx +ON omicron.public.webhook_rx_event_glob ( + rx_id +); + +CREATE INDEX IF NOT EXISTS lookup_webhook_event_globs_by_schema_version +ON omicron.public.webhook_rx_event_glob (schema_version); + +CREATE TABLE IF NOT EXISTS omicron.public.webhook_rx_subscription ( + -- UUID of the webhook receiver (foreign key into + -- `omicron.public.webhook_rx`) + rx_id UUID NOT NULL, + -- An event class to which the receiver is subscribed. + event_class omicron.public.webhook_event_class NOT NULL, + -- If this subscription is a concrete instantiation of a glob pattern, the + -- value of the glob that created it (and, a foreign key into + -- `webhook_rx_event_glob`). If the receiver is subscribed to this exact + -- event class, then this is NULL. + -- + -- This is used when deleting a glob subscription, as it is necessary to + -- delete any concrete subscriptions to individual event classes matching + -- that glob. + glob STRING(512), + + time_created TIMESTAMPTZ NOT NULL, + + PRIMARY KEY (rx_id, event_class) +); + +-- Look up all webhook receivers subscribed to an event class. This is used by +-- the dispatcher to determine who is interested in a particular event. +CREATE INDEX IF NOT EXISTS lookup_webhook_rxs_for_event_class +ON omicron.public.webhook_rx_subscription ( + event_class +); + +-- Look up all exact event class subscriptions for a receiver. +-- +-- This is used when generating a view of all user-provided original +-- subscriptions provided for a receiver. That list is generated by looking up +-- all exact event class subscriptions for the receiver ID in this table, +-- combined with the list of all globs in the `webhook_rx_event_glob` table. +CREATE INDEX IF NOT EXISTS lookup_exact_subscriptions_for_webhook_rx +on omicron.public.webhook_rx_subscription ( + rx_id +) WHERE glob IS NULL; + +/* + * Webhook event message queue. + */ + +CREATE TABLE IF NOT EXISTS omicron.public.webhook_event ( + id UUID PRIMARY KEY, + time_created TIMESTAMPTZ NOT NULL, + time_modified TIMESTAMPTZ NOT NULL, + -- The class of event that this is. + event_class omicron.public.webhook_event_class NOT NULL, + -- Actual event data. The structure of this depends on the event class. + event JSONB NOT NULL, + + -- Set when dispatch entries have been created for this event. + time_dispatched TIMESTAMPTZ, + -- The number of receivers that this event was dispatched to. + num_dispatched INT8 NOT NULL, + + CONSTRAINT time_dispatched_set_if_dispatched CHECK ( + (num_dispatched = 0) OR (time_dispatched IS NOT NULL) + ), + + CONSTRAINT num_dispatched_is_positive CHECK ( + (num_dispatched >= 0) + ) +); + +-- Singleton probe event +INSERT INTO omicron.public.webhook_event ( + id, + time_created, + time_modified, + event_class, + event, + time_dispatched, + num_dispatched +) VALUES ( + -- NOTE: this UUID is duplicated in nexus_db_model::webhook_event. + '001de000-7768-4000-8000-000000000001', + NOW(), + NOW(), + 'probe', + '{}', + -- Pretend to be dispatched so we won't show up in "list events needing + -- dispatch" queries + NOW(), + 0 +) ON CONFLICT DO NOTHING; + +-- Look up webhook events in need of dispatching. +-- +-- This is used by the message dispatcher when looking for events to dispatch. +CREATE INDEX IF NOT EXISTS lookup_undispatched_webhook_events +ON omicron.public.webhook_event ( + id, time_created +) WHERE time_dispatched IS NULL; + + +/* + * Webhook message dispatching and delivery attempts. + */ + +-- Describes why a webhook delivery was triggered +CREATE TYPE IF NOT EXISTS omicron.public.webhook_delivery_trigger AS ENUM ( + -- This delivery was triggered by the event being dispatched. + 'event', + -- This delivery was triggered by an explicit call to the webhook event + -- resend API. + 'resend', + --- This delivery is a liveness probe. + 'probe' +); + +-- Describes the state of a webhook delivery +CREATE TYPE IF NOT EXISTS omicron.public.webhook_delivery_state AS ENUM ( + -- This delivery has not yet completed. + 'pending', + -- This delivery has failed. + 'failed', + --- This delivery has completed successfully. + 'delivered' +); + +CREATE TABLE IF NOT EXISTS omicron.public.webhook_delivery ( + -- UUID of this delivery. + id UUID PRIMARY KEY, + --- UUID of the event (foreign key into `omicron.public.webhook_event`). + event_id UUID NOT NULL, + -- UUID of the webhook receiver (foreign key into + -- `omicron.public.webhook_rx`) + rx_id UUID NOT NULL, + + triggered_by omicron.public.webhook_delivery_trigger NOT NULL, + + payload JSONB NOT NULL, + + --- Delivery attempt count. Starts at 0. + attempts INT2 NOT NULL, + + time_created TIMESTAMPTZ NOT NULL, + -- If this is set, then this webhook message has either been delivered + -- successfully, or is considered permanently failed. + time_completed TIMESTAMPTZ, + + state omicron.public.webhook_delivery_state NOT NULL, + + -- Deliverator coordination bits + deliverator_id UUID, + time_leased TIMESTAMPTZ, + + CONSTRAINT attempts_is_non_negative CHECK (attempts >= 0), + CONSTRAINT active_deliveries_have_started_timestamps CHECK ( + (deliverator_id IS NULL) OR ( + deliverator_id IS NOT NULL AND time_leased IS NOT NULL + ) + ), + CONSTRAINT time_completed_iff_not_pending CHECK ( + (state = 'pending' AND time_completed IS NULL) OR + (state != 'pending' AND time_completed IS NOT NULL) + ) +); + +-- Ensure that initial delivery attempts (nexus-dispatched) are unique to avoid +-- duplicate work when an event is dispatched. For deliveries created by calls +-- to the webhook event resend API, we don't enforce this constraint, to allow +-- re-delivery to be triggered multiple times. +CREATE UNIQUE INDEX IF NOT EXISTS one_webhook_event_dispatch_per_rx +ON omicron.public.webhook_delivery ( + event_id, rx_id +) +WHERE + triggered_by = 'event'; + +-- Index for looking up all webhook messages dispatched to a receiver ID +CREATE INDEX IF NOT EXISTS lookup_webhook_delivery_dispatched_to_rx +ON omicron.public.webhook_delivery ( + rx_id, event_id +); + +-- Index for looking up all delivery attempts for an event +CREATE INDEX IF NOT EXISTS lookup_webhook_deliveries_for_event +ON omicron.public.webhook_delivery ( + event_id +); + +-- Index for looking up all currently in-flight webhook messages, and ordering +-- them by their creation times. +CREATE INDEX IF NOT EXISTS webhook_deliveries_in_flight +ON omicron.public.webhook_delivery ( + time_created, id +) WHERE + time_completed IS NULL; + +CREATE TYPE IF NOT EXISTS omicron.public.webhook_delivery_attempt_result as ENUM ( + -- The delivery attempt failed with an HTTP error. + 'failed_http_error', + -- The delivery attempt failed because the receiver endpoint was + -- unreachable. + 'failed_unreachable', + --- The delivery attempt connected successfully but no response was received + -- within the timeout. + 'failed_timeout', + -- The delivery attempt succeeded. + 'succeeded' +); + +CREATE TABLE IF NOT EXISTS omicron.public.webhook_delivery_attempt ( + -- Foreign key into `omicron.public.webhook_delivery`. + delivery_id UUID NOT NULL, + -- attempt number. + attempt INT2 NOT NULL, + + -- UUID of the webhook receiver (foreign key into + -- `omicron.public.webhook_rx`) + rx_id UUID NOT NULL, + + result omicron.public.webhook_delivery_attempt_result NOT NULL, + -- A status code > 599 would be Very Surprising, so rather than using an + -- INT4 to store a full unsigned 16-bit number in the database, we'll use a + -- signed 16-bit integer with a check constraint that it's unsigned. + response_status INT2, + response_duration INTERVAL, + time_created TIMESTAMPTZ NOT NULL, + -- UUID of the Nexus who did this delivery attempt. + deliverator_id UUID NOT NULL, + + PRIMARY KEY (delivery_id, attempt), + + -- Attempt numbers start at 1 + CONSTRAINT attempts_start_at_1 CHECK (attempt >= 1), + + -- Ensure response status codes are not negative. + -- We could be more prescriptive here, and also check that they're >= 100 + -- and <= 599, but some servers may return weird stuff, and we'd like to be + -- able to record that they did that. + CONSTRAINT response_status_is_unsigned CHECK ( + (response_status IS NOT NULL AND response_status >= 0) OR + (response_status IS NULL) + ), + + CONSTRAINT response_iff_not_unreachable CHECK ( + ( + -- If the result is 'succeedeed' or 'failed_http_error', response + -- data must be present. + (result = 'succeeded' OR result = 'failed_http_error') AND ( + response_status IS NOT NULL AND + response_duration IS NOT NULL + ) + ) OR ( + -- If the result is 'failed_unreachable' or 'failed_timeout', no + -- response data is present. + (result = 'failed_unreachable' OR result = 'failed_timeout') AND ( + response_status IS NULL AND + response_duration IS NULL + ) + ) + ) +); + +CREATE INDEX IF NOT EXISTS lookup_attempts_for_webhook_delivery +ON omicron.public.webhook_delivery_attempt ( + delivery_id +); + +CREATE INDEX IF NOT EXISTS lookup_webhook_delivery_attempts_to_rx +ON omicron.public.webhook_delivery_attempt ( + rx_id +); + /* * Keep this at the end of file so that the database does not contain a version * until it is fully populated. @@ -5003,7 +5380,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '131.0.0', NULL) + (TRUE, NOW(), NOW(), '132.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/schema/crdb/webhooks/README.adoc b/schema/crdb/webhooks/README.adoc new file mode 100644 index 00000000000..0a184ea3b74 --- /dev/null +++ b/schema/crdb/webhooks/README.adoc @@ -0,0 +1,58 @@ +# Overview + +This migration adds initial tables required for webhook delivery. + +## Upgrade steps + +The individual transactions in this upgrade do the following: + +* *Webhook receivers*: +** `up01.sql` creates the `omicron.public.webhook_receiver` table, which stores +the receiver endpoints that receive webhook events. +** `up02.sql` creates the `lookup_webhook_rx_by_id` index on that table, for listing non-deleted webhook receivers. +** `up03.sql` creates the `lookup_webhook_rx_by_name` index on that table, for looking up receivers by name (and ensuring names are unique across all non-deleted receivers). +** *Secrets*: +*** `up04.sql` creates the `omicron.public.webhook_secret` table, which +associates webhook receivers with secret keys and their IDs. +*** `up05.sql` creates the `lookup_webhook_secrets_by_rx` index on that table, +for looking up all secrets associated with a receiver. +* *Event classes, subscriptions, and globbing*: +** `up06.sql` creates the `omicron.public.webhook_event_class` enum type +** *Globs*: +*** `up07.sql` creates the `omicron.public.webhook_rx_event_glob` table, which contains any subscriptions created by a receiver that have glob patterns. This table is used when generating exact subscription from globs. +*** `up08.sql` creates the `lookup_webhook_event_globs_for_rx` index on `webhook_rx_event_glob`, for looking up all globs belonging to a receiver by ID. +*** `up09.sql` creates the `lookup_webhook_event_globs_by_schema_version` index on `webhook_rx_event_glob`, for searching for globs with outdated schema versions. +** *Subscriptions*: +*** `up10.sql` creates the `omicron.public.webhook_rx_subscription` table, which tracks the event classes that a receiver is subscribed to. If a row in this table represents a subscription that was generated by a glob, this table also references the glob record. +*** `up11.sql` creates the `lookup_webhook_rxs_for_event_class` index on `webhook_rx_subscription`, for listing all the receivers subscribed to an event class +*** `up12.sql` creates the `lookup_exact_subscriptions_for_webhook_rx` index, for looking up the exact subscriptions (not globs) for a receiver by ID. This is used along with `lookup_webhook_event_globs_for_rx` index when listing the user-provided event class strings. +* *Webhook events*: +** `up13.sql` creates the `omicron.public.webhook_event` table, which contains the +values of actual webhook events. The dispatcher operates on entries in +this queue, dispatching the event to receivers and generating the payload for +each receiver. +** `up14.sql` inserts the singleton row in `webhook_event` used for liveness probes. This singleton exists so that delivery records for liveness probes can have event UUIDs that point at a real entry in `webhook_event`, without requiring a new event entry to be created for each probe. +** `up15.sql` creates the `lookup_undispatched_webhook_events` index on `webhook_event` for looking up webhook messages which have not yet been dispatched, and ordering by their creation times. +* *Webhook message dispatching and delivery attempts*: +** *Dispatch table*: +*** `up16.sql` creates the `omicron.public.webhook_delivery_trigger` enum, which tracks why a webhook delivery was initiated. + +*** `up17.sql` creates the `omicron.public.webhook_delivery_state` enum, representing the current state of a webhook delivery. +*** `up18.sql` creates the table `omicron.public.webhook_delivery`, which tracks the webhook messages that have been dispatched to receivers. +*** `up19.sql` creates the `one_webhook_event_dispatch_per_rx` unique index on `webhook_delivery`. ++ +This index functions as a `UNIQUE` constraint on the tuple of `(event_id, rx_id)`, but ONLY for rows with `trigger = 'event'`. This ensures that concurrently-executing webhook dispatchers will not create multiple deliveries when dispatching a new event, but permits multiple re-deliveries of an event to be explicitly triggered. +*** `up20.sql` creates an index `lookup_webhook_delivery_dispatched_to_rx` for looking up +entries in `webhook_delivery` by receiver ID. +*** `up21.sql` creates an index `lookup_webhook_deliveries_for_event` on `webhook_delivery` for looking up deliveries by event UUID. +*** `up22.sql` creates an index `webhook_deliveries_in_flight` for looking up all currently in-flight webhook +deliveries (entries where the `time_completed` field has not been set). +** *Delivery attempts*: +*** `up23.sql` creates the enum `omicron.public.webhook_delivery_attempt_result`, +representing the potential outcomes of a webhook delivery attempt. +*** `up24.sql` creates the table `omicron.public.webhook_delivery_attempt`, +which records each individual delivery attempt for a webhook delivery in the +`webhook_delivery` table. +*** `up25.sql` creates an index `lookup_attempts_for_webhook_delivery` on +`webhook_delivery_attempt`, for looking up the attempts for a given delivery ID. +*** `up26.sql` creates an index `lookup_webhook_delivery_attempts_to_rx` on the `webhook_delivery_attempt` table, for looking up delivery attempts to a given receiver ID. This is primarily used for deleting delivery attempts when a receiver is deleted. diff --git a/schema/crdb/webhooks/up01.sql b/schema/crdb/webhooks/up01.sql new file mode 100644 index 00000000000..d4afbb5ea93 --- /dev/null +++ b/schema/crdb/webhooks/up01.sql @@ -0,0 +1,18 @@ +CREATE TABLE IF NOT EXISTS omicron.public.webhook_receiver ( + /* Identity metadata (resource) */ + id UUID PRIMARY KEY, + name STRING(63) NOT NULL, + description STRING(512) NOT NULL, + time_created TIMESTAMPTZ NOT NULL, + time_modified TIMESTAMPTZ NOT NULL, + time_deleted TIMESTAMPTZ, + -- Child resource generation for secrets. + secret_gen INT NOT NULL, + + -- Child resource generation for subscriptions. This is separate from + -- `secret_gen`, as updating secrets and updating subscriptions are separate + -- operations which don't conflict with each other. + subscription_gen INT NOT NULL, + -- URL of the endpoint webhooks are delivered to. + endpoint STRING(512) NOT NULL +); diff --git a/schema/crdb/webhooks/up02.sql b/schema/crdb/webhooks/up02.sql new file mode 100644 index 00000000000..f2f069346c2 --- /dev/null +++ b/schema/crdb/webhooks/up02.sql @@ -0,0 +1,4 @@ +CREATE UNIQUE INDEX IF NOT EXISTS lookup_webhook_rx_by_id +ON omicron.public.webhook_receiver (id) +WHERE + time_deleted IS NULL; diff --git a/schema/crdb/webhooks/up03.sql b/schema/crdb/webhooks/up03.sql new file mode 100644 index 00000000000..cf97de37251 --- /dev/null +++ b/schema/crdb/webhooks/up03.sql @@ -0,0 +1,5 @@ +CREATE UNIQUE INDEX IF NOT EXISTS lookup_webhook_rx_by_name +ON omicron.public.webhook_receiver ( + name +) WHERE + time_deleted IS NULL; diff --git a/schema/crdb/webhooks/up04.sql b/schema/crdb/webhooks/up04.sql new file mode 100644 index 00000000000..4f46ece10a2 --- /dev/null +++ b/schema/crdb/webhooks/up04.sql @@ -0,0 +1,14 @@ +CREATE TABLE IF NOT EXISTS omicron.public.webhook_secret ( + -- ID of this secret. + id UUID PRIMARY KEY, + time_created TIMESTAMPTZ NOT NULL, + -- N.B. that this will always be equal to `time_created` for secrets, as + -- they are never modified once created. + time_modified TIMESTAMPTZ NOT NULL, + time_deleted TIMESTAMPTZ, + -- UUID of the webhook receiver (foreign key into + -- `omicron.public.webhook_rx`) + rx_id UUID NOT NULL, + -- Secret value. + secret STRING(512) NOT NULL +); diff --git a/schema/crdb/webhooks/up05.sql b/schema/crdb/webhooks/up05.sql new file mode 100644 index 00000000000..ab263e8705e --- /dev/null +++ b/schema/crdb/webhooks/up05.sql @@ -0,0 +1,5 @@ +CREATE INDEX IF NOT EXISTS lookup_webhook_secrets_by_rx +ON omicron.public.webhook_secret ( + rx_id +) WHERE + time_deleted IS NULL; diff --git a/schema/crdb/webhooks/up06.sql b/schema/crdb/webhooks/up06.sql new file mode 100644 index 00000000000..e00847f3cee --- /dev/null +++ b/schema/crdb/webhooks/up06.sql @@ -0,0 +1,13 @@ +CREATE TYPE IF NOT EXISTS omicron.public.webhook_event_class AS ENUM ( + -- Liveness probes, which are technically not real events, but, you know... + 'probe', + -- Test classes used to test globbing. + -- + -- These are not publicly exposed. + 'test.foo', + 'test.foo.bar', + 'test.foo.baz', + 'test.quux.bar', + 'test.quux.bar.baz' + -- Add new event classes here! +); diff --git a/schema/crdb/webhooks/up07.sql b/schema/crdb/webhooks/up07.sql new file mode 100644 index 00000000000..8aa06b10f56 --- /dev/null +++ b/schema/crdb/webhooks/up07.sql @@ -0,0 +1,17 @@ +CREATE TABLE IF NOT EXISTS omicron.public.webhook_rx_event_glob ( + -- UUID of the webhook receiver (foreign key into + -- `omicron.public.webhook_rx`) + rx_id UUID NOT NULL, + -- An event class glob to which this receiver is subscribed. + glob STRING(512) NOT NULL, + -- Regex used when evaluating this filter against concrete event classes. + regex STRING(512) NOT NULL, + time_created TIMESTAMPTZ NOT NULL, + -- The database schema version at which this glob was last expanded. + -- + -- This is used to detect when a glob must be re-processed to generate exact + -- subscriptions on schema changes. + schema_version STRING(64) NOT NULL, + + PRIMARY KEY (rx_id, glob) +); diff --git a/schema/crdb/webhooks/up08.sql b/schema/crdb/webhooks/up08.sql new file mode 100644 index 00000000000..c8dfef06f4e --- /dev/null +++ b/schema/crdb/webhooks/up08.sql @@ -0,0 +1,4 @@ +CREATE INDEX IF NOT EXISTS lookup_webhook_event_globs_for_rx +ON omicron.public.webhook_rx_event_glob ( + rx_id +); diff --git a/schema/crdb/webhooks/up09.sql b/schema/crdb/webhooks/up09.sql new file mode 100644 index 00000000000..be6363dae71 --- /dev/null +++ b/schema/crdb/webhooks/up09.sql @@ -0,0 +1,2 @@ +CREATE INDEX IF NOT EXISTS lookup_webhook_event_globs_by_schema_version +ON omicron.public.webhook_rx_event_glob (schema_version); diff --git a/schema/crdb/webhooks/up10.sql b/schema/crdb/webhooks/up10.sql new file mode 100644 index 00000000000..ffd6a0e8278 --- /dev/null +++ b/schema/crdb/webhooks/up10.sql @@ -0,0 +1,20 @@ +CREATE TABLE IF NOT EXISTS omicron.public.webhook_rx_subscription ( + -- UUID of the webhook receiver (foreign key into + -- `omicron.public.webhook_rx`) + rx_id UUID NOT NULL, + -- An event class to which the receiver is subscribed. + event_class omicron.public.webhook_event_class NOT NULL, + -- If this subscription is a concrete instantiation of a glob pattern, the + -- value of the glob that created it (and, a foreign key into + -- `webhook_rx_event_glob`). If the receiver is subscribed to this exact + -- event class, then this is NULL. + -- + -- This is used when deleting a glob subscription, as it is necessary to + -- delete any concrete subscriptions to individual event classes matching + -- that glob. + glob STRING(512), + + time_created TIMESTAMPTZ NOT NULL, + + PRIMARY KEY (rx_id, event_class) +); diff --git a/schema/crdb/webhooks/up11.sql b/schema/crdb/webhooks/up11.sql new file mode 100644 index 00000000000..8f79b29b605 --- /dev/null +++ b/schema/crdb/webhooks/up11.sql @@ -0,0 +1,6 @@ +-- Look up all webhook receivers subscribed to an event class. This is used by +-- the dispatcher to determine who is interested in a particular event. +CREATE INDEX IF NOT EXISTS lookup_webhook_rxs_for_event_class +ON omicron.public.webhook_rx_subscription ( + event_class +); diff --git a/schema/crdb/webhooks/up12.sql b/schema/crdb/webhooks/up12.sql new file mode 100644 index 00000000000..0b3b6becd0e --- /dev/null +++ b/schema/crdb/webhooks/up12.sql @@ -0,0 +1,4 @@ +CREATE INDEX IF NOT EXISTS lookup_exact_subscriptions_for_webhook_rx +on omicron.public.webhook_rx_subscription ( + rx_id +) WHERE glob IS NULL; diff --git a/schema/crdb/webhooks/up13.sql b/schema/crdb/webhooks/up13.sql new file mode 100644 index 00000000000..3b2f2450d1f --- /dev/null +++ b/schema/crdb/webhooks/up13.sql @@ -0,0 +1,22 @@ +CREATE TABLE IF NOT EXISTS omicron.public.webhook_event ( + id UUID PRIMARY KEY, + time_created TIMESTAMPTZ NOT NULL, + time_modified TIMESTAMPTZ NOT NULL, + -- The class of event that this is. + event_class omicron.public.webhook_event_class NOT NULL, + -- Actual event data. The structure of this depends on the event class. + event JSONB NOT NULL, + + -- Set when dispatch entries have been created for this event. + time_dispatched TIMESTAMPTZ, + -- The number of receivers that this event was dispatched to. + num_dispatched INT8 NOT NULL, + + CONSTRAINT time_dispatched_set_if_dispatched CHECK ( + (num_dispatched = 0) OR (time_dispatched IS NOT NULL) + ), + + CONSTRAINT num_dispatched_is_positive CHECK ( + (num_dispatched >= 0) + ) +); diff --git a/schema/crdb/webhooks/up14.sql b/schema/crdb/webhooks/up14.sql new file mode 100644 index 00000000000..8184c755660 --- /dev/null +++ b/schema/crdb/webhooks/up14.sql @@ -0,0 +1,21 @@ +-- Singleton probe event +INSERT INTO omicron.public.webhook_event ( + id, + time_created, + time_modified, + event_class, + event, + time_dispatched, + num_dispatched +) VALUES ( + -- NOTE: this UUID is duplicated in nexus_db_model::webhook_event. + '001de000-7768-4000-8000-000000000001', + NOW(), + NOW(), + 'probe', + '{}', + -- Pretend to be dispatched so we won't show up in "list events needing + -- dispatch" queries + NOW(), + 0 +) ON CONFLICT DO NOTHING; diff --git a/schema/crdb/webhooks/up15.sql b/schema/crdb/webhooks/up15.sql new file mode 100644 index 00000000000..81542e30acd --- /dev/null +++ b/schema/crdb/webhooks/up15.sql @@ -0,0 +1,4 @@ +CREATE INDEX IF NOT EXISTS lookup_undispatched_webhook_events +ON omicron.public.webhook_event ( + id, time_created +) WHERE time_dispatched IS NULL; diff --git a/schema/crdb/webhooks/up16.sql b/schema/crdb/webhooks/up16.sql new file mode 100644 index 00000000000..562c6abce9d --- /dev/null +++ b/schema/crdb/webhooks/up16.sql @@ -0,0 +1,9 @@ +CREATE TYPE IF NOT EXISTS omicron.public.webhook_delivery_trigger AS ENUM ( + -- This delivery was triggered by the event being dispatched. + 'event', + -- This delivery was triggered by an explicit call to the webhook event + -- resend API. + 'resend', + --- This delivery is a liveness probe. + 'probe' +); diff --git a/schema/crdb/webhooks/up17.sql b/schema/crdb/webhooks/up17.sql new file mode 100644 index 00000000000..9dbcedca319 --- /dev/null +++ b/schema/crdb/webhooks/up17.sql @@ -0,0 +1,9 @@ +-- Describes the state of a webhook delivery +CREATE TYPE IF NOT EXISTS omicron.public.webhook_delivery_state AS ENUM ( + -- This delivery has not yet completed. + 'pending', + -- This delivery has failed. + 'failed', + --- This delivery has completed successfully. + 'delivered' +); diff --git a/schema/crdb/webhooks/up18.sql b/schema/crdb/webhooks/up18.sql new file mode 100644 index 00000000000..89a2b2d4d3f --- /dev/null +++ b/schema/crdb/webhooks/up18.sql @@ -0,0 +1,38 @@ +CREATE TABLE IF NOT EXISTS omicron.public.webhook_delivery ( + -- UUID of this delivery. + id UUID PRIMARY KEY, + --- UUID of the event (foreign key into `omicron.public.webhook_event`). + event_id UUID NOT NULL, + -- UUID of the webhook receiver (foreign key into + -- `omicron.public.webhook_rx`) + rx_id UUID NOT NULL, + + triggered_by omicron.public.webhook_delivery_trigger NOT NULL, + + payload JSONB NOT NULL, + + --- Delivery attempt count. Starts at 0. + attempts INT2 NOT NULL, + + time_created TIMESTAMPTZ NOT NULL, + -- If this is set, then this webhook message has either been delivered + -- successfully, or is considered permanently failed. + time_completed TIMESTAMPTZ, + + state omicron.public.webhook_delivery_state NOT NULL, + + -- Deliverator coordination bits + deliverator_id UUID, + time_leased TIMESTAMPTZ, + + CONSTRAINT attempts_is_non_negative CHECK (attempts >= 0), + CONSTRAINT active_deliveries_have_started_timestamps CHECK ( + (deliverator_id IS NULL) OR ( + deliverator_id IS NOT NULL AND time_leased IS NOT NULL + ) + ), + CONSTRAINT time_completed_iff_not_pending CHECK ( + (state = 'pending' AND time_completed IS NULL) OR + (state != 'pending' AND time_completed IS NOT NULL) + ) +); diff --git a/schema/crdb/webhooks/up19.sql b/schema/crdb/webhooks/up19.sql new file mode 100644 index 00000000000..6f6ca73ee4a --- /dev/null +++ b/schema/crdb/webhooks/up19.sql @@ -0,0 +1,6 @@ +CREATE UNIQUE INDEX IF NOT EXISTS one_webhook_event_dispatch_per_rx +ON omicron.public.webhook_delivery ( + event_id, rx_id +) +WHERE + triggered_by = 'event'; diff --git a/schema/crdb/webhooks/up20.sql b/schema/crdb/webhooks/up20.sql new file mode 100644 index 00000000000..e3ff154e746 --- /dev/null +++ b/schema/crdb/webhooks/up20.sql @@ -0,0 +1,4 @@ +CREATE INDEX IF NOT EXISTS lookup_webhook_delivery_dispatched_to_rx +ON omicron.public.webhook_delivery ( + rx_id, event_id +); diff --git a/schema/crdb/webhooks/up21.sql b/schema/crdb/webhooks/up21.sql new file mode 100644 index 00000000000..49709646b92 --- /dev/null +++ b/schema/crdb/webhooks/up21.sql @@ -0,0 +1,4 @@ +CREATE INDEX IF NOT EXISTS lookup_webhook_deliveries_for_event +ON omicron.public.webhook_delivery ( + event_id +); diff --git a/schema/crdb/webhooks/up22.sql b/schema/crdb/webhooks/up22.sql new file mode 100644 index 00000000000..ba3c68c7e44 --- /dev/null +++ b/schema/crdb/webhooks/up22.sql @@ -0,0 +1,5 @@ +CREATE INDEX IF NOT EXISTS webhook_deliveries_in_flight +ON omicron.public.webhook_delivery ( + time_created, id +) WHERE + time_completed IS NULL; diff --git a/schema/crdb/webhooks/up23.sql b/schema/crdb/webhooks/up23.sql new file mode 100644 index 00000000000..41e16a4da73 --- /dev/null +++ b/schema/crdb/webhooks/up23.sql @@ -0,0 +1,12 @@ +CREATE TYPE IF NOT EXISTS omicron.public.webhook_delivery_attempt_result as ENUM ( + -- The delivery attempt failed with an HTTP error. + 'failed_http_error', + -- The delivery attempt failed because the receiver endpoint was + -- unreachable. + 'failed_unreachable', + --- The delivery attempt connected successfully but no response was received + -- within the timeout. + 'failed_timeout', + -- The delivery attempt succeeded. + 'succeeded' +); diff --git a/schema/crdb/webhooks/up24.sql b/schema/crdb/webhooks/up24.sql new file mode 100644 index 00000000000..4cc19f7db16 --- /dev/null +++ b/schema/crdb/webhooks/up24.sql @@ -0,0 +1,53 @@ + +CREATE TABLE IF NOT EXISTS omicron.public.webhook_delivery_attempt ( + -- Foreign key into `omicron.public.webhook_delivery`. + delivery_id UUID NOT NULL, + -- attempt number. + attempt INT2 NOT NULL, + + -- UUID of the webhook receiver (foreign key into + -- `omicron.public.webhook_rx`) + rx_id UUID NOT NULL, + + result omicron.public.webhook_delivery_attempt_result NOT NULL, + -- A status code > 599 would be Very Surprising, so rather than using an + -- INT4 to store a full unsigned 16-bit number in the database, we'll use a + -- signed 16-bit integer with a check constraint that it's unsigned. + response_status INT2, + response_duration INTERVAL, + time_created TIMESTAMPTZ NOT NULL, + -- UUID of the Nexus who did this delivery attempt. + deliverator_id UUID NOT NULL, + + PRIMARY KEY (delivery_id, attempt), + + -- Attempt numbers start at 1 + CONSTRAINT attempts_start_at_1 CHECK (attempt >= 1), + + -- Ensure response status codes are not negative. + -- We could be more prescriptive here, and also check that they're >= 100 + -- and <= 599, but some servers may return weird stuff, and we'd like to be + -- able to record that they did that. + CONSTRAINT response_status_is_unsigned CHECK ( + (response_status IS NOT NULL AND response_status >= 0) OR + (response_status IS NULL) + ), + + CONSTRAINT response_iff_not_unreachable CHECK ( + ( + -- If the result is 'succeedeed' or 'failed_http_error', response + -- data must be present. + (result = 'succeeded' OR result = 'failed_http_error') AND ( + response_status IS NOT NULL AND + response_duration IS NOT NULL + ) + ) OR ( + -- If the result is 'failed_unreachable' or 'failed_timeout', no + -- response data is present. + (result = 'failed_unreachable' OR result = 'failed_timeout') AND ( + response_status IS NULL AND + response_duration IS NULL + ) + ) + ) +); diff --git a/schema/crdb/webhooks/up25.sql b/schema/crdb/webhooks/up25.sql new file mode 100644 index 00000000000..cd08b4b316b --- /dev/null +++ b/schema/crdb/webhooks/up25.sql @@ -0,0 +1,4 @@ +CREATE INDEX IF NOT EXISTS lookup_attempts_for_webhook_delivery +ON omicron.public.webhook_delivery_attempt ( + delivery_id +); diff --git a/schema/crdb/webhooks/up26.sql b/schema/crdb/webhooks/up26.sql new file mode 100644 index 00000000000..422ab3a6981 --- /dev/null +++ b/schema/crdb/webhooks/up26.sql @@ -0,0 +1,4 @@ +CREATE INDEX IF NOT EXISTS lookup_webhook_delivery_attempts_to_rx +ON omicron.public.webhook_delivery_attempt ( + rx_id +); diff --git a/smf/nexus/multi-sled/config-partial.toml b/smf/nexus/multi-sled/config-partial.toml index 12f587db9a4..770bee73b1b 100644 --- a/smf/nexus/multi-sled/config-partial.toml +++ b/smf/nexus/multi-sled/config-partial.toml @@ -76,6 +76,10 @@ region_snapshot_replacement_finish.period_secs = 30 tuf_artifact_replication.period_secs = 300 tuf_artifact_replication.min_sled_replication = 3 read_only_region_replacement_start.period_secs = 30 +# In general, the webhook dispatcher will be activated when events are queued, +# so we don't need to periodically activate it *that* frequently. +webhook_dispatcher.period_secs = 60 +webhook_deliverator.period_secs = 60 [default_region_allocation_strategy] # by default, allocate across 3 distinct sleds diff --git a/smf/nexus/single-sled/config-partial.toml b/smf/nexus/single-sled/config-partial.toml index 016c5430a47..eefe7554ed6 100644 --- a/smf/nexus/single-sled/config-partial.toml +++ b/smf/nexus/single-sled/config-partial.toml @@ -76,6 +76,10 @@ region_snapshot_replacement_finish.period_secs = 30 tuf_artifact_replication.period_secs = 300 tuf_artifact_replication.min_sled_replication = 1 read_only_region_replacement_start.period_secs = 30 +# In general, the webhook dispatcher will be activated when events are queued, +# so we don't need to periodically activate it *that* frequently. +webhook_dispatcher.period_secs = 60 +webhook_deliverator.period_secs = 60 [default_region_allocation_strategy] # by default, allocate without requirement for distinct sleds. diff --git a/uuid-kinds/src/lib.rs b/uuid-kinds/src/lib.rs index 0d66b230c84..33fbc039946 100644 --- a/uuid-kinds/src/lib.rs +++ b/uuid-kinds/src/lib.rs @@ -78,5 +78,9 @@ impl_typed_uuid_kind! { UpstairsSession => "upstairs_session", Vnic => "vnic", Volume => "volume", + WebhookEvent => "webhook_event", + WebhookReceiver => "webhook_receiver", + WebhookDelivery => "webhook_delivery", + WebhookSecret => "webhook_secret", Zpool => "zpool", } diff --git a/workspace-hack/Cargo.toml b/workspace-hack/Cargo.toml index 75ea7e50216..0393fda5c86 100644 --- a/workspace-hack/Cargo.toml +++ b/workspace-hack/Cargo.toml @@ -105,7 +105,7 @@ reqwest = { version = "0.12.12", features = ["blocking", "cookies", "json", "rus rsa = { version = "0.9.6", features = ["serde", "sha2"] } rustls = { version = "0.23.19", features = ["ring"] } rustls-webpki = { version = "0.102.8", default-features = false, features = ["aws_lc_rs", "ring", "std"] } -schemars = { version = "0.8.22", features = ["bytes", "chrono", "semver", "uuid1"] } +schemars = { version = "0.8.22", features = ["bytes", "chrono", "semver", "url", "uuid1"] } scopeguard = { version = "1.2.0" } semver = { version = "1.0.25", features = ["serde"] } serde = { version = "1.0.218", features = ["alloc", "derive", "rc"] } @@ -128,6 +128,7 @@ toml = { version = "0.7.8" } toml_datetime = { version = "0.6.8", default-features = false, features = ["serde"] } toml_edit-3c51e837cfc5589a = { package = "toml_edit", version = "0.22.24", features = ["serde"] } tracing = { version = "0.1.40", features = ["log"] } +url = { version = "2.5.3", features = ["serde"] } usdt = { version = "0.5.0" } usdt-impl = { version = "0.5.0", default-features = false, features = ["asm", "des"] } uuid = { version = "1.15.1", features = ["serde", "v4"] } @@ -227,7 +228,7 @@ reqwest = { version = "0.12.12", features = ["blocking", "cookies", "json", "rus rsa = { version = "0.9.6", features = ["serde", "sha2"] } rustls = { version = "0.23.19", features = ["ring"] } rustls-webpki = { version = "0.102.8", default-features = false, features = ["aws_lc_rs", "ring", "std"] } -schemars = { version = "0.8.22", features = ["bytes", "chrono", "semver", "uuid1"] } +schemars = { version = "0.8.22", features = ["bytes", "chrono", "semver", "url", "uuid1"] } scopeguard = { version = "1.2.0" } semver = { version = "1.0.25", features = ["serde"] } serde = { version = "1.0.218", features = ["alloc", "derive", "rc"] } @@ -253,6 +254,7 @@ toml_datetime = { version = "0.6.8", default-features = false, features = ["serd toml_edit-3c51e837cfc5589a = { package = "toml_edit", version = "0.22.24", features = ["serde"] } tracing = { version = "0.1.40", features = ["log"] } unicode-xid = { version = "0.2.6" } +url = { version = "2.5.3", features = ["serde"] } usdt = { version = "0.5.0" } usdt-impl = { version = "0.5.0", default-features = false, features = ["asm", "des"] } uuid = { version = "1.15.1", features = ["serde", "v4"] }