From 6c221cdd882738f6a9d34c0993ff62978b958b8b Mon Sep 17 00:00:00 2001 From: Issac Kelly Date: Fri, 15 Sep 2023 14:56:20 -0700 Subject: [PATCH] Reduce logs noise --- src/fs_store.rs | 26 ++++++++++--------------- src/network_connect.rs | 2 +- src/repo.rs | 15 ++------------- tests/document_changed.rs | 40 ++++++++++++++++++++++----------------- tests/fs_storage/main.rs | 1 + 5 files changed, 37 insertions(+), 47 deletions(-) diff --git a/src/fs_store.rs b/src/fs_store.rs index ce161ae5..4a453d14 100644 --- a/src/fs_store.rs +++ b/src/fs_store.rs @@ -106,7 +106,7 @@ impl FsStore { .map_err(|e| Error(ErrorKind::ErrReadingLevel1Path(entry.path(), e)))? .is_file() { - tracing::warn!( + tracing::trace!( non_dir_path=%entry.path().display(), "unexpected non-directory at level1 of database" ); @@ -123,14 +123,14 @@ impl FsStore { .metadata() .map_err(|e| Error(ErrorKind::ErrReadingLevel2Path(entry.path(), e)))?; if !metadata.is_dir() { - tracing::warn!( + tracing::trace!( non_file_path=%entry.path().display(), "unexpected non-directory at level2 of database" ); continue; } let Some(doc_paths) = DocIdPaths::parse(entry.path()) else { - tracing::warn!( + tracing::trace!( non_doc_path=%entry.path().display(), "unexpected non-document path at level2 of database" ); @@ -157,7 +157,6 @@ impl FsStore { // Load all the data we have into a doc match Chunks::load(&self.root, id) { Ok(Some(chunks)) => { - println!("hmm..."); let doc = chunks .to_doc() .map_err(|e| Error(ErrorKind::LoadDocToCompact(e)))?; @@ -165,23 +164,20 @@ impl FsStore { // Write the snapshot let output_chunk_name = SavedChunkName::new_snapshot(doc.get_heads()); let chunk = doc.save(); - println!("Going to write: {:#?}", output_chunk_name); write_chunk(&self.root, &paths, &chunk, output_chunk_name.clone())?; // Remove all the old data for incremental in chunks.incrementals.keys() { let path = paths.chunk_path(&self.root, incremental); - println!("Removing {:?}", path); std::fs::remove_file(&path) .map_err(|e| Error(ErrorKind::DeleteChunk(path, e)))?; } let just_wrote = paths.chunk_path(&self.root, &output_chunk_name); for snapshot in chunks.snapshots.keys() { let path = paths.chunk_path(&self.root, snapshot); - println!("Removing Snap {:?}", path); if path == just_wrote { - tracing::error!("Somehow trying to delete the same path we just wrote to. Not today Satan"); + tracing::trace!("Somehow trying to delete the same path we just wrote to. Not today Satan"); continue; } @@ -190,7 +186,6 @@ impl FsStore { } } Ok(None) => { - println!("No existing files,and compaction requested first"); let output_chunk_name = SavedChunkName { hash: uuid::Uuid::new_v4().as_bytes().to_vec(), chunk_type: ChunkType::Snapshot, @@ -199,7 +194,6 @@ impl FsStore { write_chunk(&self.root, &paths, full_doc, output_chunk_name)?; } Err(e) => { - println!("Error loading chunks for {:?} {}", self.root, id); tracing::error!(e=%e, "Error loading chunks"); } } @@ -233,8 +227,8 @@ fn write_chunk( // with a name based on the hash of the heads of the document let output_path = paths.chunk_path(root, &name); - tracing::warn!("Renaming: {:?}", temp_save); - tracing::warn!("To: {:?}", output_path); + tracing::trace!("Renaming: {:?}", temp_save); + tracing::trace!("To: {:?}", output_path); std::fs::rename(&temp_save_path, &output_path) .map_err(|e| Error(ErrorKind::RenameTempFile(temp_save_path, output_path, e)))?; @@ -372,7 +366,7 @@ impl Chunks { fn load(root: &Path, doc_id: &DocumentId) -> Result, Error> { let doc_id_hash = DocIdPaths::from(doc_id); let level2_path = doc_id_hash.level2_path(root); - tracing::warn!( + tracing::trace!( root=%root.display(), doc_id=?doc_id, doc_path=%level2_path.display(), @@ -408,12 +402,12 @@ impl Chunks { .map_err(|e| Error(ErrorKind::ErrReadingChunkFileMetadata(path.clone(), e)))? .is_file() { - tracing::warn!(bad_file=%path.display(), "unexpected non-file in level2 path"); + tracing::trace!(bad_file=%path.display(), "unexpected non-file in level2 path"); continue; } let Some(chunk_name) = entry.file_name().to_str().and_then(SavedChunkName::parse) else { - tracing::warn!(bad_file=%path.display(), "unexpected non-chunk file in level2 path"); + tracing::trace!(bad_file=%path.display(), "unexpected non-chunk file in level2 path"); continue; }; tracing::debug!(chunk_path=%path.display(), "reading chunk file"); @@ -423,7 +417,7 @@ impl Chunks { match e.kind() { std::io::ErrorKind::NotFound => { // Could be a concurrent process compacting, not an error - tracing::warn!( + tracing::trace!( missing_chunk_path=%path.display(), "chunk file disappeared while reading chunks", ); diff --git a/src/network_connect.rs b/src/network_connect.rs index 4bf15be6..61006ea8 100644 --- a/src/network_connect.rs +++ b/src/network_connect.rs @@ -35,7 +35,7 @@ impl RepoHandle { Ok(repo_msg) } Ok(m) => { - tracing::warn!(?m, repo_id=?repo_id, "Received non-repo message"); + tracing::trace!(?m, repo_id=?repo_id, "Received non-repo message"); Err(NetworkError::Error) } Err(e) => { diff --git a/src/repo.rs b/src/repo.rs index d22ec9fb..3b9acae5 100644 --- a/src/repo.rs +++ b/src/repo.rs @@ -704,7 +704,7 @@ impl DocumentInfo { let count = { let doc = self.document.read(); let changes = doc.automerge.get_changes(&self.last_heads); - println!( + tracing::trace!( "last: {:?}, current: {:?}", self.last_heads, doc.automerge.get_heads() @@ -713,7 +713,6 @@ impl DocumentInfo { changes.len() }; let has_patches = count > 0; - println!("Has patches: {:?}", has_patches); self.patches_since_last_compact = self .patches_since_last_compact .checked_add(count) @@ -733,24 +732,19 @@ impl DocumentInfo { storage: &dyn Storage, wake_sender: &Sender, ) { - println!("We decided to save the document"); if !self.state.should_save() { - println!("No"); return; } let should_compact = self.patches_since_last_compact > self.allowable_changes_until_compaction; let (storage_fut, new_heads) = if should_compact { - println!("We decided to Compact the document"); let (to_save, new_heads) = { let doc = self.document.read(); (doc.automerge.save(), doc.automerge.get_heads()) }; self.patches_since_last_compact = 0; - println!("Since compact is zero"); (storage.compact(document_id.clone(), to_save), new_heads) } else { - println!("We decided to incremental the document"); let (to_save, new_heads) = { let doc = self.document.read(); ( @@ -759,10 +753,6 @@ impl DocumentInfo { ) }; self.patches_since_last_compact.checked_add(1).unwrap_or(0); - println!( - "Saves since last compact {}", - self.patches_since_last_compact - ); (storage.append(document_id.clone(), to_save), new_heads) }; match self.state { @@ -1261,7 +1251,6 @@ impl Repo { self.sinks_to_poll.insert(to_repo_id); } if is_first_edit { - println!("First edit"); // Send a sync message to all other repos we are connected with. for repo_id in self.remote_repos.keys() { if let Some(message) = info.generate_first_sync_message(repo_id.clone()) @@ -1355,7 +1344,7 @@ impl Repo { let state = info.document.read(); state.automerge.get_heads() }; - println!("Change observer: {:?} {:?}", current_heads, change_hash); + tracing::trace!("Change observer: {:?} {:?}", current_heads, change_hash); if current_heads == change_hash { info.change_observers.push(observer); } else { diff --git a/tests/document_changed.rs b/tests/document_changed.rs index 796e8c76..7eb74a55 100644 --- a/tests/document_changed.rs +++ b/tests/document_changed.rs @@ -40,12 +40,16 @@ fn test_document_changed_over_sync() { ); peers.insert(repo_handle_2.get_repo_id().clone(), network_1); peers.insert(repo_handle_1.get_repo_id().clone(), network_2); - + // Edit the document. document_handle_1.with_doc_mut(|doc| { let mut tx = doc.transaction(); - tx.put(automerge::ROOT, "repo_id", format!("{}", repo_handle_1.get_repo_id().clone())) - .expect("Failed to change the document."); + tx.put( + automerge::ROOT, + "repo_id", + format!("{}", repo_handle_1.get_repo_id().clone()), + ) + .expect("Failed to change the document."); tx.commit(); }); @@ -63,18 +67,20 @@ fn test_document_changed_over_sync() { doc_handle.with_doc_mut(|doc| { println!("Heads when 2 makes edit: {:?}", doc.get_heads()); let id = doc - .get(automerge::ROOT, "repo_id") - .expect("Failed to read the document.") - .unwrap(); - println!("Id when two makes edit: {:?}", id); - {let mut tx = doc.transaction(); - tx.put( - automerge::ROOT, - "repo_id", - format!("{}", repo_handle_2.get_repo_id()), - ) - .expect("Failed to change the document."); - tx.commit();} + .get(automerge::ROOT, "repo_id") + .expect("Failed to read the document.") + .unwrap(); + println!("Id when two makes edit: {:?}", id); + { + let mut tx = doc.transaction(); + tx.put( + automerge::ROOT, + "repo_id", + format!("{}", repo_handle_2.get_repo_id()), + ) + .expect("Failed to change the document."); + tx.commit(); + } println!("Heads after 2 makes edit: {:?}", doc.get_heads()); }); }); @@ -84,13 +90,13 @@ fn test_document_changed_over_sync() { let repo_id = repo_handle_1.get_repo_id().clone(); rt.spawn(async move { loop { - // Await changes until the edit comes through over sync. + // Await changes until the edit comes through over sync. let equals = document_handle_1.with_doc(|doc| { let val = doc .get(automerge::ROOT, "repo_id") .expect("Failed to read the document.") .unwrap(); - println!("Val: {:?}", val); + println!("Val: {:?}", val); val.0.to_str().unwrap() == format!("{}", expected_repo_id) }); if equals { diff --git a/tests/fs_storage/main.rs b/tests/fs_storage/main.rs index 82613918..74a6d78e 100644 --- a/tests/fs_storage/main.rs +++ b/tests/fs_storage/main.rs @@ -1,6 +1,7 @@ use automerge::transaction::Transactable; use automerge_repo::fs_store; use itertools::Itertools; +use uuid::Uuid; /// Asserts that the &[u8] in `data` is some permutation of the chunks of Vec<&[u8> in `expected` macro_rules! assert_permutation_of {