Skip to content

Commit

Permalink
Reduce logs noise
Browse files Browse the repository at this point in the history
  • Loading branch information
issackelly committed Sep 15, 2023
1 parent 84ad6cf commit 6c221cd
Show file tree
Hide file tree
Showing 5 changed files with 37 additions and 47 deletions.
26 changes: 10 additions & 16 deletions src/fs_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ impl FsStore {
.map_err(|e| Error(ErrorKind::ErrReadingLevel1Path(entry.path(), e)))?
.is_file()
{
tracing::warn!(
tracing::trace!(
non_dir_path=%entry.path().display(),
"unexpected non-directory at level1 of database"
);
Expand All @@ -123,14 +123,14 @@ impl FsStore {
.metadata()
.map_err(|e| Error(ErrorKind::ErrReadingLevel2Path(entry.path(), e)))?;
if !metadata.is_dir() {
tracing::warn!(
tracing::trace!(
non_file_path=%entry.path().display(),
"unexpected non-directory at level2 of database"
);
continue;
}
let Some(doc_paths) = DocIdPaths::parse(entry.path()) else {
tracing::warn!(
tracing::trace!(
non_doc_path=%entry.path().display(),
"unexpected non-document path at level2 of database"
);
Expand All @@ -157,31 +157,27 @@ impl FsStore {
// Load all the data we have into a doc
match Chunks::load(&self.root, id) {
Ok(Some(chunks)) => {
println!("hmm...");
let doc = chunks
.to_doc()
.map_err(|e| Error(ErrorKind::LoadDocToCompact(e)))?;

// Write the snapshot
let output_chunk_name = SavedChunkName::new_snapshot(doc.get_heads());
let chunk = doc.save();
println!("Going to write: {:#?}", output_chunk_name);
write_chunk(&self.root, &paths, &chunk, output_chunk_name.clone())?;

// Remove all the old data
for incremental in chunks.incrementals.keys() {
let path = paths.chunk_path(&self.root, incremental);
println!("Removing {:?}", path);
std::fs::remove_file(&path)
.map_err(|e| Error(ErrorKind::DeleteChunk(path, e)))?;
}
let just_wrote = paths.chunk_path(&self.root, &output_chunk_name);
for snapshot in chunks.snapshots.keys() {
let path = paths.chunk_path(&self.root, snapshot);
println!("Removing Snap {:?}", path);

if path == just_wrote {
tracing::error!("Somehow trying to delete the same path we just wrote to. Not today Satan");
tracing::trace!("Somehow trying to delete the same path we just wrote to. Not today Satan");
continue;
}

Expand All @@ -190,7 +186,6 @@ impl FsStore {
}
}
Ok(None) => {
println!("No existing files,and compaction requested first");
let output_chunk_name = SavedChunkName {
hash: uuid::Uuid::new_v4().as_bytes().to_vec(),
chunk_type: ChunkType::Snapshot,
Expand All @@ -199,7 +194,6 @@ impl FsStore {
write_chunk(&self.root, &paths, full_doc, output_chunk_name)?;
}
Err(e) => {
println!("Error loading chunks for {:?} {}", self.root, id);
tracing::error!(e=%e, "Error loading chunks");
}
}
Expand Down Expand Up @@ -233,8 +227,8 @@ fn write_chunk(
// with a name based on the hash of the heads of the document
let output_path = paths.chunk_path(root, &name);

tracing::warn!("Renaming: {:?}", temp_save);
tracing::warn!("To: {:?}", output_path);
tracing::trace!("Renaming: {:?}", temp_save);
tracing::trace!("To: {:?}", output_path);

std::fs::rename(&temp_save_path, &output_path)
.map_err(|e| Error(ErrorKind::RenameTempFile(temp_save_path, output_path, e)))?;
Expand Down Expand Up @@ -372,7 +366,7 @@ impl Chunks {
fn load(root: &Path, doc_id: &DocumentId) -> Result<Option<Self>, Error> {
let doc_id_hash = DocIdPaths::from(doc_id);
let level2_path = doc_id_hash.level2_path(root);
tracing::warn!(
tracing::trace!(
root=%root.display(),
doc_id=?doc_id,
doc_path=%level2_path.display(),
Expand Down Expand Up @@ -408,12 +402,12 @@ impl Chunks {
.map_err(|e| Error(ErrorKind::ErrReadingChunkFileMetadata(path.clone(), e)))?
.is_file()
{
tracing::warn!(bad_file=%path.display(), "unexpected non-file in level2 path");
tracing::trace!(bad_file=%path.display(), "unexpected non-file in level2 path");
continue;
}
let Some(chunk_name) = entry.file_name().to_str().and_then(SavedChunkName::parse)
else {
tracing::warn!(bad_file=%path.display(), "unexpected non-chunk file in level2 path");
tracing::trace!(bad_file=%path.display(), "unexpected non-chunk file in level2 path");
continue;
};
tracing::debug!(chunk_path=%path.display(), "reading chunk file");
Expand All @@ -423,7 +417,7 @@ impl Chunks {
match e.kind() {
std::io::ErrorKind::NotFound => {
// Could be a concurrent process compacting, not an error
tracing::warn!(
tracing::trace!(
missing_chunk_path=%path.display(),
"chunk file disappeared while reading chunks",
);
Expand Down
2 changes: 1 addition & 1 deletion src/network_connect.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ impl RepoHandle {
Ok(repo_msg)
}
Ok(m) => {
tracing::warn!(?m, repo_id=?repo_id, "Received non-repo message");
tracing::trace!(?m, repo_id=?repo_id, "Received non-repo message");
Err(NetworkError::Error)
}
Err(e) => {
Expand Down
15 changes: 2 additions & 13 deletions src/repo.rs
Original file line number Diff line number Diff line change
Expand Up @@ -704,7 +704,7 @@ impl DocumentInfo {
let count = {
let doc = self.document.read();
let changes = doc.automerge.get_changes(&self.last_heads);
println!(
tracing::trace!(
"last: {:?}, current: {:?}",
self.last_heads,
doc.automerge.get_heads()
Expand All @@ -713,7 +713,6 @@ impl DocumentInfo {
changes.len()
};
let has_patches = count > 0;
println!("Has patches: {:?}", has_patches);
self.patches_since_last_compact = self
.patches_since_last_compact
.checked_add(count)
Expand All @@ -733,24 +732,19 @@ impl DocumentInfo {
storage: &dyn Storage,
wake_sender: &Sender<WakeSignal>,
) {
println!("We decided to save the document");
if !self.state.should_save() {
println!("No");
return;
}
let should_compact =
self.patches_since_last_compact > self.allowable_changes_until_compaction;
let (storage_fut, new_heads) = if should_compact {
println!("We decided to Compact the document");
let (to_save, new_heads) = {
let doc = self.document.read();
(doc.automerge.save(), doc.automerge.get_heads())
};
self.patches_since_last_compact = 0;
println!("Since compact is zero");
(storage.compact(document_id.clone(), to_save), new_heads)
} else {
println!("We decided to incremental the document");
let (to_save, new_heads) = {
let doc = self.document.read();
(
Expand All @@ -759,10 +753,6 @@ impl DocumentInfo {
)
};
self.patches_since_last_compact.checked_add(1).unwrap_or(0);
println!(
"Saves since last compact {}",
self.patches_since_last_compact
);
(storage.append(document_id.clone(), to_save), new_heads)
};
match self.state {
Expand Down Expand Up @@ -1261,7 +1251,6 @@ impl Repo {
self.sinks_to_poll.insert(to_repo_id);
}
if is_first_edit {
println!("First edit");
// Send a sync message to all other repos we are connected with.
for repo_id in self.remote_repos.keys() {
if let Some(message) = info.generate_first_sync_message(repo_id.clone())
Expand Down Expand Up @@ -1355,7 +1344,7 @@ impl Repo {
let state = info.document.read();
state.automerge.get_heads()
};
println!("Change observer: {:?} {:?}", current_heads, change_hash);
tracing::trace!("Change observer: {:?} {:?}", current_heads, change_hash);
if current_heads == change_hash {
info.change_observers.push(observer);
} else {
Expand Down
40 changes: 23 additions & 17 deletions tests/document_changed.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,16 @@ fn test_document_changed_over_sync() {
);
peers.insert(repo_handle_2.get_repo_id().clone(), network_1);
peers.insert(repo_handle_1.get_repo_id().clone(), network_2);

// Edit the document.
document_handle_1.with_doc_mut(|doc| {
let mut tx = doc.transaction();
tx.put(automerge::ROOT, "repo_id", format!("{}", repo_handle_1.get_repo_id().clone()))
.expect("Failed to change the document.");
tx.put(
automerge::ROOT,
"repo_id",
format!("{}", repo_handle_1.get_repo_id().clone()),
)
.expect("Failed to change the document.");
tx.commit();
});

Expand All @@ -63,18 +67,20 @@ fn test_document_changed_over_sync() {
doc_handle.with_doc_mut(|doc| {
println!("Heads when 2 makes edit: {:?}", doc.get_heads());
let id = doc
.get(automerge::ROOT, "repo_id")
.expect("Failed to read the document.")
.unwrap();
println!("Id when two makes edit: {:?}", id);
{let mut tx = doc.transaction();
tx.put(
automerge::ROOT,
"repo_id",
format!("{}", repo_handle_2.get_repo_id()),
)
.expect("Failed to change the document.");
tx.commit();}
.get(automerge::ROOT, "repo_id")
.expect("Failed to read the document.")
.unwrap();
println!("Id when two makes edit: {:?}", id);
{
let mut tx = doc.transaction();
tx.put(
automerge::ROOT,
"repo_id",
format!("{}", repo_handle_2.get_repo_id()),
)
.expect("Failed to change the document.");
tx.commit();
}
println!("Heads after 2 makes edit: {:?}", doc.get_heads());
});
});
Expand All @@ -84,13 +90,13 @@ fn test_document_changed_over_sync() {
let repo_id = repo_handle_1.get_repo_id().clone();
rt.spawn(async move {
loop {
// Await changes until the edit comes through over sync.
// Await changes until the edit comes through over sync.
let equals = document_handle_1.with_doc(|doc| {
let val = doc
.get(automerge::ROOT, "repo_id")
.expect("Failed to read the document.")
.unwrap();
println!("Val: {:?}", val);
println!("Val: {:?}", val);
val.0.to_str().unwrap() == format!("{}", expected_repo_id)
});
if equals {
Expand Down
1 change: 1 addition & 0 deletions tests/fs_storage/main.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use automerge::transaction::Transactable;
use automerge_repo::fs_store;
use itertools::Itertools;
use uuid::Uuid;

/// Asserts that the &[u8] in `data` is some permutation of the chunks of Vec<&[u8> in `expected`
macro_rules! assert_permutation_of {
Expand Down

0 comments on commit 6c221cd

Please sign in to comment.