diff --git a/src/services/backends/kubernetes.rs b/src/services/backends/kubernetes.rs index 10b37e3..dbea29d 100644 --- a/src/services/backends/kubernetes.rs +++ b/src/services/backends/kubernetes.rs @@ -4,5 +4,5 @@ mod tests; pub mod kubeconfig_loader; pub mod kubernetes_resource_manager; pub mod kubernetes_resource_watcher; -pub mod logging_update_handler; pub mod repositories; +pub mod resource_update_handler; diff --git a/src/services/backends/kubernetes/kubernetes_resource_manager.rs b/src/services/backends/kubernetes/kubernetes_resource_manager.rs index d1f9499..11d3c40 100644 --- a/src/services/backends/kubernetes/kubernetes_resource_manager.rs +++ b/src/services/backends/kubernetes/kubernetes_resource_manager.rs @@ -6,9 +6,8 @@ use crate::services::backends::kubernetes::kubernetes_resource_manager::object_o use crate::services::backends::kubernetes::kubernetes_resource_manager::status::Status; use crate::services::backends::kubernetes::kubernetes_resource_manager::status::Status::{Conflict, NotOwned}; use crate::services::backends::kubernetes::kubernetes_resource_manager::status::owner_conflict_details::OwnerConflictDetails; -use crate::services::backends::kubernetes::kubernetes_resource_watcher::{ - KubernetesResourceWatcher, ResourceUpdateHandler, -}; +use crate::services::backends::kubernetes::kubernetes_resource_watcher::KubernetesResourceWatcher; +use crate::services::backends::kubernetes::resource_update_handler::ResourceUpdateHandler; use anyhow::{Error, anyhow}; use async_trait::async_trait; use futures::StreamExt; @@ -217,7 +216,7 @@ where .for_each(move |r| { let update_handler = update_handler.clone(); async move { - update_handler.handle_update(r).await; + update_handler.handle_update(&r).await; } }); diff --git a/src/services/backends/kubernetes/kubernetes_resource_manager/spin_lock.rs b/src/services/backends/kubernetes/kubernetes_resource_manager/spin_lock.rs index 3ff2cad..75a7a3f 100644 --- a/src/services/backends/kubernetes/kubernetes_resource_manager/spin_lock.rs +++ b/src/services/backends/kubernetes/kubernetes_resource_manager/spin_lock.rs @@ -3,7 +3,7 @@ mod tests; use super::{KubernetesResourceManager, KubernetesResourceManagerConfig, ResourceUpdateHandler, UpdateLabels}; use crate::services::backends::kubernetes::kubernetes_resource_manager::status::Status; use crate::services::backends::kubernetes::kubernetes_resource_watcher::KubernetesResourceWatcher; -use crate::services::backends::kubernetes::logging_update_handler::LoggingUpdateHandler; +use crate::services::backends::kubernetes::resource_update_handler::logging_update_handler::LoggingUpdateHandler; use anyhow::Error; use kube::runtime::reflector::ObjectRef; use serde::Serialize; diff --git a/src/services/backends/kubernetes/kubernetes_resource_watcher.rs b/src/services/backends/kubernetes/kubernetes_resource_watcher.rs index 9a432af..1c7148a 100644 --- a/src/services/backends/kubernetes/kubernetes_resource_watcher.rs +++ b/src/services/backends/kubernetes/kubernetes_resource_watcher.rs @@ -1,22 +1,14 @@ use crate::services::backends::kubernetes::kubernetes_resource_manager::KubernetesResourceManagerConfig; +use crate::services::backends::kubernetes::resource_update_handler::ResourceUpdateHandler; use async_trait::async_trait; use k8s_openapi::NamespaceResourceScope; use kube::Resource; -use kube::runtime::watcher; use serde::Serialize; use serde::de::DeserializeOwned; use std::fmt::Debug; use std::hash::Hash; use std::sync::Arc; -#[async_trait] -pub trait ResourceUpdateHandler: Send + Sync -where - S: Resource + Send + Sync, -{ - async fn handle_update(&self, result: Result) -> (); -} - #[async_trait] pub trait KubernetesResourceWatcher: Sized where diff --git a/src/services/backends/kubernetes/repositories.rs b/src/services/backends/kubernetes/repositories.rs index 808539b..7786d7d 100644 --- a/src/services/backends/kubernetes/repositories.rs +++ b/src/services/backends/kubernetes/repositories.rs @@ -4,9 +4,12 @@ use crate::services::backends::kubernetes::kubernetes_resource_manager::status:: use crate::services::backends::kubernetes::kubernetes_resource_manager::{ KubernetesResourceManagerConfig, UpdateLabels, }; -use crate::services::backends::kubernetes::logging_update_handler::LoggingUpdateHandler; use crate::services::backends::kubernetes::repositories::try_into_object_ref::TryIntoObjectRef; -use crate::services::base::upsert_repository::{CanDelete, ReadOnlyRepository, UpsertRepository}; +use crate::services::backends::kubernetes::resource_update_handler::ResourceUpdateHandler; +use crate::services::backends::kubernetes::resource_update_handler::logging_update_handler::LoggingUpdateHandler; +use crate::services::base::upsert_repository::{ + CanDelete, ReadOnlyRepository, UpsertRepository, UpsertRepositoryWithDelete, +}; use async_trait::async_trait; use k8s_openapi::NamespaceResourceScope; use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta; @@ -16,6 +19,7 @@ use serde::Serialize; use serde::de::DeserializeOwned; use std::fmt::Debug; use std::hash::Hash; +use std::marker::PhantomData; use std::sync::Arc; use std::time::Duration; use tokio::time::Instant; @@ -57,26 +61,29 @@ where Self: Sized; } -pub struct KubernetesRepository +pub struct KubernetesRepository where Resource: kube::Resource + SoftDeleteResource + Send + Sync + 'static, Resource::DynamicType: Hash + Eq, { pub resource_manager: SpinLockKubernetesResourceManager, pub operation_timeout: Duration, + _phantom: PhantomData<(Resource, H)>, } -impl KubernetesRepository +impl KubernetesRepository where R: kube::Resource + SoftDeleteResource + UpdateLabels + Send + Sync + 'static, R::DynamicType: Hash + Eq + Clone + Default, + H: ResourceUpdateHandler + Send + Sync + 'static, { - pub async fn start(config: KubernetesResourceManagerConfig) -> anyhow::Result { + pub async fn start(config: KubernetesResourceManagerConfig, update_handler: Arc) -> anyhow::Result { let operation_timeout = config.operation_timeout; - let resource_manager = SpinLockKubernetesResourceManager::start(config, Arc::new(LoggingUpdateHandler)).await?; + let resource_manager = SpinLockKubernetesResourceManager::start(config, update_handler).await?; Ok(KubernetesRepository { resource_manager, operation_timeout, + _phantom: PhantomData, }) } @@ -98,12 +105,13 @@ where } #[async_trait] -impl ReadOnlyRepository for KubernetesRepository +impl ReadOnlyRepository for KubernetesRepository where Resource: SoftDeleteResource + UpdateLabels, Resource::DynamicType: Hash + Eq + Clone + Default, Key: TryIntoObjectRef + Send + Sync + 'static, Value: TryFromResource + Send + Sync + 'static, + H: ResourceUpdateHandler + Send + Sync + 'static, { type ReadError = Status; @@ -124,12 +132,13 @@ where } #[async_trait] -impl CanDelete for KubernetesRepository +impl CanDelete for KubernetesRepository where Resource: SoftDeleteResource + UpdateLabels, Resource::DynamicType: Hash + Eq + Clone + Default, Key: TryIntoObjectRef + Send + Sync + Clone + 'static, Value: Send + Sync + 'static, + H: ResourceUpdateHandler + Send + Sync + 'static, { type DeleteError = Status; @@ -164,12 +173,13 @@ where } #[async_trait] -impl UpsertRepository for KubernetesRepository +impl UpsertRepository for KubernetesRepository where Resource: SoftDeleteResource + UpdateLabels, Resource::DynamicType: Hash + Eq + Clone + Default, Key: TryIntoObjectRef + Send + Sync + Clone + 'static, Value: ToResource + TryFromResource + Send + Sync + 'static, + H: ResourceUpdateHandler + Send + Sync + 'static, { type Error = Status; @@ -221,3 +231,13 @@ where Ok(self.resource_manager.get(&object_ref).is_ok()) } } + +impl UpsertRepositoryWithDelete for KubernetesRepository +where + Resource: SoftDeleteResource + UpdateLabels, + Resource::DynamicType: Hash + Eq + Clone + Default, + Key: TryIntoObjectRef + Send + Sync + Clone + 'static, + Value: ToResource + TryFromResource + Send + Sync + 'static, + H: ResourceUpdateHandler + Send + Sync + 'static, +{ +} diff --git a/src/services/backends/kubernetes/repositories/schema_repository.rs b/src/services/backends/kubernetes/repositories/schema_repository.rs index db38632..933c668 100644 --- a/src/services/backends/kubernetes/repositories/schema_repository.rs +++ b/src/services/backends/kubernetes/repositories/schema_repository.rs @@ -54,7 +54,5 @@ impl ToAuditRecord for SchemaFragment { } } -impl UpsertRepositoryWithDelete for KubernetesRepository {} - pub type SchemaRepository = dyn UpsertRepositoryWithDelete; diff --git a/src/services/backends/kubernetes/repositories/schema_repository/tests.rs b/src/services/backends/kubernetes/repositories/schema_repository/tests.rs index 70e93fe..7e5e01f 100644 --- a/src/services/backends/kubernetes/repositories/schema_repository/tests.rs +++ b/src/services/backends/kubernetes/repositories/schema_repository/tests.rs @@ -5,6 +5,7 @@ use crate::services::backends::kubernetes::kubernetes_resource_manager::status:: use crate::services::backends::kubernetes::repositories::TryIntoObjectRef; use crate::services::backends::kubernetes::repositories::schema_repository::test_reduced_schema::reduced_schema; use crate::services::backends::kubernetes::repositories::schema_repository::test_schema::schema; +use crate::services::backends::kubernetes::resource_update_handler::logging_update_handler::LoggingUpdateHandler; use crate::testing::api_extensions::{WaitForDelete, WaitForResource}; use crate::testing::spin_lock_kubernetes_resource_manager_context::SpinLockKubernetesResourceManagerTestContext; use assert_matches::assert_matches; @@ -13,6 +14,7 @@ use kube::api::PostParams; use kube::runtime::reflector::ObjectRef; use maplit::btreemap; use std::collections::BTreeMap; +use std::marker::PhantomData; use std::time::Duration; use test_context::{AsyncTestContext, test_context}; @@ -29,9 +31,10 @@ impl AsyncTestContext for KubernetesSchemaRepositoryTest { async fn setup() -> KubernetesSchemaRepositoryTest { let parent = SpinLockKubernetesResourceManagerTestContext::setup().await; let label = parent.config.owner_mark.get_owner_name().clone(); - let repository = Arc::new(KubernetesRepository { + let repository = Arc::new(KubernetesRepository:: { resource_manager: parent.manager, operation_timeout: parent.config.operation_timeout, + _phantom: PhantomData, }); Self { repository, diff --git a/src/services/backends/kubernetes/resource_update_handler.rs b/src/services/backends/kubernetes/resource_update_handler.rs new file mode 100644 index 0000000..9f90c1b --- /dev/null +++ b/src/services/backends/kubernetes/resource_update_handler.rs @@ -0,0 +1,14 @@ +pub mod composed_update_handler; +pub mod logging_update_handler; + +use async_trait::async_trait; +use kube::Resource; +use kube::runtime::watcher; + +#[async_trait] +pub trait ResourceUpdateHandler: Send + Sync +where + S: Resource + Send + Sync, +{ + async fn handle_update(&self, result: &Result); +} diff --git a/src/services/backends/kubernetes/resource_update_handler/composed_update_handler.rs b/src/services/backends/kubernetes/resource_update_handler/composed_update_handler.rs new file mode 100644 index 0000000..ea94251 --- /dev/null +++ b/src/services/backends/kubernetes/resource_update_handler/composed_update_handler.rs @@ -0,0 +1,34 @@ +#[cfg(test)] +mod tests; + +use crate::services::backends::kubernetes::resource_update_handler::ResourceUpdateHandler; +use async_trait::async_trait; +use kube::Resource; +use std::fmt::Debug; + +pub struct ComposedUpdateHandler { + handlers: Vec>>, +} + +impl ComposedUpdateHandler { + pub fn new() -> Self { + Self { handlers: Vec::new() } + } + + pub fn add_handler(mut self, handler: Box>) -> Self { + self.handlers.push(handler); + self + } +} + +#[async_trait] +impl ResourceUpdateHandler for ComposedUpdateHandler +where + T: Resource + Debug + Send + Sync + 'static, +{ + async fn handle_update(&self, result: &Result) { + for handler in &self.handlers { + handler.handle_update(result).await; + } + } +} diff --git a/src/services/backends/kubernetes/resource_update_handler/composed_update_handler/tests.rs b/src/services/backends/kubernetes/resource_update_handler/composed_update_handler/tests.rs new file mode 100644 index 0000000..ae41a24 --- /dev/null +++ b/src/services/backends/kubernetes/resource_update_handler/composed_update_handler/tests.rs @@ -0,0 +1,87 @@ +use crate::services::backends::kubernetes::resource_update_handler::ResourceUpdateHandler; +use crate::services::backends::kubernetes::resource_update_handler::composed_update_handler::ComposedUpdateHandler; +use anyhow::anyhow; +use async_trait::async_trait; +use k8s_openapi::api::core::v1::ConfigMap; +use kube::runtime::watcher; +use std::sync::Arc; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::SeqCst; + +struct TestResourceUpdateHandler { + sucessfull_calls: Arc, + failed_calls: Arc, +} + +#[async_trait] +impl ResourceUpdateHandler for TestResourceUpdateHandler { + async fn handle_update(&self, result: &Result) { + match result { + Ok(result) => { + result.metadata.name.as_ref().map(|name| { + if name == "bad-configmap" { + println!("Received bad ConfigMap: {}", name); + self.failed_calls.fetch_add(1, SeqCst); + Err(anyhow!("Test error for bad-configmap")) + } else { + println!("Received good ConfigMap: {}", name); + self.sucessfull_calls.fetch_add(1, SeqCst); + Ok("Test success for bad-configmap") + } + }); + } + Err(err) => panic!("Test should not receive errors: {:?}", err), + } + } +} + +#[tokio::test] +/// Validates that the ComposedUpdateHandler does not stop processing other handlers on error +async fn test_error_handling() { + let sucessfull_calls = Arc::new(AtomicUsize::new(0)); + let failed_calls = Arc::new(AtomicUsize::new(0)); + let handler = ComposedUpdateHandler::new() + .add_handler(Box::new(TestResourceUpdateHandler { + sucessfull_calls: sucessfull_calls.clone(), + failed_calls: failed_calls.clone(), + })) + .add_handler(Box::new(TestResourceUpdateHandler { + sucessfull_calls: sucessfull_calls.clone(), + failed_calls: failed_calls.clone(), + })) + .add_handler(Box::new(TestResourceUpdateHandler { + sucessfull_calls: sucessfull_calls.clone(), + failed_calls: failed_calls.clone(), + })); + + let cms = vec![ + ConfigMap { + metadata: kube::api::ObjectMeta { + name: Some("good-configmap".to_string()), + ..Default::default() + }, + ..Default::default() + }, + ConfigMap { + metadata: kube::api::ObjectMeta { + name: Some("bad-configmap".to_string()), + ..Default::default() + }, + ..Default::default() + }, + ConfigMap { + metadata: kube::api::ObjectMeta { + name: Some("another-good-configmap".to_string()), + ..Default::default() + }, + ..Default::default() + }, + ]; + + for cm in cms { + handler.handle_update(&Ok(cm)).await; + } + + assert_eq!(sucessfull_calls.load(SeqCst), 2 * 3); // Each good configmap should be counted by each handler + assert_eq!(failed_calls.load(SeqCst), 1 * 3); // Each bad configmap should be counted by each handler +} diff --git a/src/services/backends/kubernetes/logging_update_handler.rs b/src/services/backends/kubernetes/resource_update_handler/logging_update_handler.rs similarity index 71% rename from src/services/backends/kubernetes/logging_update_handler.rs rename to src/services/backends/kubernetes/resource_update_handler/logging_update_handler.rs index 9cd9efb..76dc691 100644 --- a/src/services/backends/kubernetes/logging_update_handler.rs +++ b/src/services/backends/kubernetes/resource_update_handler/logging_update_handler.rs @@ -1,14 +1,9 @@ -#[cfg(not(test))] -use log::{debug, warn}; - -#[cfg(test)] -use std::{println as warn, println as debug}; - -use crate::services::backends::kubernetes::kubernetes_resource_watcher::ResourceUpdateHandler; +use crate::services::backends::kubernetes::resource_update_handler::ResourceUpdateHandler; use async_trait::async_trait; use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta; use kube::Resource; use kube::runtime::watcher; +use log::{debug, warn}; use std::fmt::Debug; @@ -19,8 +14,8 @@ impl ResourceUpdateHandler for LoggingUpdateHandler where T: Resource + Debug + Send + Sync + 'static, { - async fn handle_update(&self, event: Result) -> () { - if let Err(e) = event { + async fn handle_update(&self, event: &Result) -> () { + if let Err(e) = event.as_ref() { warn!("Error processing event: {}", e); return; } diff --git a/src/services/backends/memory.rs b/src/services/backends/memory.rs index af8a0d5..c14072a 100644 --- a/src/services/backends/memory.rs +++ b/src/services/backends/memory.rs @@ -1,5 +1,9 @@ +#[cfg(test)] +mod tests; + use crate::services::base::upsert_repository::{ - CanDelete, ReadOnlyRepository, UpsertRepository, UpsertRepositoryWithDelete, + CanDelete, ReadOnlyRepository, ReadOnlyRepositoryWithFactory, UpsertRepository, UpsertRepositoryWithDelete, + ValueFactory, }; use anyhow::bail; use async_trait::async_trait; @@ -8,8 +12,16 @@ use std::fmt::Debug; use std::hash::Hash; use tokio::sync::RwLock; +pub struct InMemoryRepository(RwLock>); + +impl InMemoryRepository { + pub fn new() -> Self { + InMemoryRepository(RwLock::new(HashMap::new())) + } +} + #[async_trait] -impl ReadOnlyRepository for RwLock> +impl ReadOnlyRepository for InMemoryRepository where Entity: Clone + Send + Sync, Key: Debug + Eq + Hash + Send + Sync, @@ -17,7 +29,7 @@ where type ReadError = anyhow::Error; async fn get(&self, key: Key) -> Result { - let read_guard = self.read().await; + let read_guard = self.0.read().await; match (*read_guard).get(&key) { Some(entity) => Ok(entity.clone()), None => bail!("Entity {:?} not found", key), @@ -26,7 +38,7 @@ where } #[async_trait] -impl UpsertRepository for RwLock> +impl UpsertRepository for InMemoryRepository where Entity: Send + Sync + Clone, Key: Send + Sync + Eq + Hash + Debug, @@ -34,19 +46,19 @@ where type Error = anyhow::Error; async fn upsert(&self, key: Key, entity: Entity) -> Result { - let mut write_guard = self.write().await; + let mut write_guard = self.0.write().await; (*write_guard).insert(key, entity.clone()); Ok(entity) } async fn exists(&self, key: Key) -> Result { - let read_guard = self.read().await; + let read_guard = self.0.read().await; Ok((*read_guard).get(&key).is_some()) } } #[async_trait] -impl CanDelete for RwLock> +impl CanDelete for InMemoryRepository where Entity: Send + Sync + Clone, Key: Send + Sync + Eq + Hash + Debug, @@ -54,15 +66,49 @@ where type DeleteError = anyhow::Error; async fn delete(&self, key: Key) -> Result<(), Self::DeleteError> { - let mut write_guard = self.write().await; + let mut write_guard = self.0.write().await; (*write_guard).remove(&key); Ok(()) } } -impl UpsertRepositoryWithDelete for RwLock> +impl UpsertRepositoryWithDelete for InMemoryRepository where Entity: Send + Sync + Clone, Key: Send + Sync + Eq + Hash + Debug, { } + +#[async_trait] +impl ReadOnlyRepositoryWithFactory for InMemoryRepository +where + Entity: Send + Sync + Clone, + Key: Send + Sync + Eq + Hash + Debug + Clone, +{ + type ReadError = anyhow::Error; + + async fn get( + &self, + key: Key, + value_factory: &dyn ValueFactory, + ) -> Result { + // First, acquire a read lock to check if the entity exists. + { + let read_guard = self.0.read().await; + if let Some(entity) = (*read_guard).get(&key) { + return Ok(entity.clone()); + } + } + // Release the read lock before calling the factory. + let new_entity = value_factory.create(&key).await?; + // Acquire a write lock to insert the new entity. + let mut write_guard = self.0.write().await; + // Check again in case another thread inserted it while we were creating. + if let Some(entity) = (*write_guard).get(&key) { + Ok(entity.clone()) + } else { + (*write_guard).insert(key, new_entity.clone()); + Ok(new_entity) + } + } +} diff --git a/src/services/backends/memory/tests.rs b/src/services/backends/memory/tests.rs new file mode 100644 index 0000000..2675301 --- /dev/null +++ b/src/services/backends/memory/tests.rs @@ -0,0 +1,44 @@ +use crate::services::base::upsert_repository::{ReadOnlyRepositoryWithFactory, ValueFactory}; +use anyhow::anyhow; +use async_trait::async_trait; + +struct SuccessfulValueFactory; + +#[async_trait] +impl ValueFactory for SuccessfulValueFactory { + type CreateError = anyhow::Error; + + async fn create(&self, key: &String) -> Result { + let f = async { Ok(format!("Value for key: {}", key)) }; + f.await + } +} + +struct FailedValueFactory; + +#[async_trait] +impl ValueFactory for FailedValueFactory { + type CreateError = anyhow::Error; + + async fn create(&self, key: &String) -> Result { + let f = async { Err(anyhow!("Error for key: {}", key)) }; + f.await + } +} +#[tokio::test] +async fn test_create_value_successfully() { + // Arrange + let repo = super::InMemoryRepository::::new(); + let key = "test-key".to_string(); + let result = repo.get(key, &SuccessfulValueFactory).await.unwrap(); + assert_eq!(result, "Value for key: test-key".to_string()); +} + +#[tokio::test] +async fn test_create_value_failure() { + // Arrange + let repo = super::InMemoryRepository::::new(); + let key = "test-key".to_string(); + let result = repo.get(key, &FailedValueFactory).await.unwrap_err(); + assert_eq!(result.to_string(), "Error for key: test-key".to_string()); +} diff --git a/src/services/base/upsert_repository.rs b/src/services/base/upsert_repository.rs index 94dc240..423c7e3 100644 --- a/src/services/base/upsert_repository.rs +++ b/src/services/base/upsert_repository.rs @@ -1,11 +1,11 @@ use async_trait::async_trait; #[async_trait] -/// Represents a repository for policies +/// Represents a repository for Boxer entities that can be created or updated pub trait UpsertRepository: ReadOnlyRepository + Send + Sync { type Error; - /// Updates or inserts a policy by id + /// Updates or inserts a Boxer entity by id async fn upsert(&self, key: Key, entity: Entity) -> Result; /// Checks if an object exists @@ -13,7 +13,7 @@ pub trait UpsertRepository: ReadOnlyRepository + Send } #[async_trait] -/// Represents a repository for policies +/// Represents a repository for Boxer entities that can only be read pub trait ReadOnlyRepository: Send + Sync { type ReadError; @@ -22,7 +22,29 @@ pub trait ReadOnlyRepository: Send + Sync { } #[async_trait] -/// Represents a repository for policies +/// Factory trait to create new entities if they do not exist in the repository +pub trait ValueFactory: Send + Sync { + type CreateError; + + async fn create(&self, key: &Key) -> Result; +} + +#[async_trait] +/// Represents a repository for Boxer entities that can be read +/// with the ability to create new entities if they do not exist +pub trait ReadOnlyRepositoryWithFactory: Send + Sync { + type ReadError; + + /// Retrieves or creates a Boxer entity by id + async fn get( + &self, + key: Key, + create_new: &dyn ValueFactory, + ) -> Result; +} + +#[async_trait] +/// Represents a repository for a Boxer entities that can be deleted pub trait CanDelete: Send + Sync { type DeleteError; @@ -30,6 +52,7 @@ pub trait CanDelete: Send + Sync { async fn delete(&self, key: Key) -> Result<(), Self::DeleteError>; } +/// Combines UpsertRepository and CanDelete traits pub trait UpsertRepositoryWithDelete: UpsertRepository + CanDelete { // This trait is a marker trait that combines UpsertRepository and CanDelete } diff --git a/src/testing/spin_lock_kubernetes_resource_manager_context.rs b/src/testing/spin_lock_kubernetes_resource_manager_context.rs index c14fb4f..3de5d42 100644 --- a/src/testing/spin_lock_kubernetes_resource_manager_context.rs +++ b/src/testing/spin_lock_kubernetes_resource_manager_context.rs @@ -3,7 +3,7 @@ use crate::services::backends::kubernetes::kubernetes_resource_manager::spin_loc use crate::services::backends::kubernetes::kubernetes_resource_manager::{ KubernetesResourceManagerConfig, UpdateLabels, }; -use crate::services::backends::kubernetes::logging_update_handler::LoggingUpdateHandler; +use crate::services::backends::kubernetes::resource_update_handler::logging_update_handler::LoggingUpdateHandler; use crate::testing::api_client_context::ApiClientContext; use serde::Serialize; use serde::de::DeserializeOwned;