Skip to content

Commit 6c221cd

Browse files
committed
Reduce logs noise
1 parent 84ad6cf commit 6c221cd

File tree

5 files changed

+37
-47
lines changed

5 files changed

+37
-47
lines changed

src/fs_store.rs

Lines changed: 10 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ impl FsStore {
106106
.map_err(|e| Error(ErrorKind::ErrReadingLevel1Path(entry.path(), e)))?
107107
.is_file()
108108
{
109-
tracing::warn!(
109+
tracing::trace!(
110110
non_dir_path=%entry.path().display(),
111111
"unexpected non-directory at level1 of database"
112112
);
@@ -123,14 +123,14 @@ impl FsStore {
123123
.metadata()
124124
.map_err(|e| Error(ErrorKind::ErrReadingLevel2Path(entry.path(), e)))?;
125125
if !metadata.is_dir() {
126-
tracing::warn!(
126+
tracing::trace!(
127127
non_file_path=%entry.path().display(),
128128
"unexpected non-directory at level2 of database"
129129
);
130130
continue;
131131
}
132132
let Some(doc_paths) = DocIdPaths::parse(entry.path()) else {
133-
tracing::warn!(
133+
tracing::trace!(
134134
non_doc_path=%entry.path().display(),
135135
"unexpected non-document path at level2 of database"
136136
);
@@ -157,31 +157,27 @@ impl FsStore {
157157
// Load all the data we have into a doc
158158
match Chunks::load(&self.root, id) {
159159
Ok(Some(chunks)) => {
160-
println!("hmm...");
161160
let doc = chunks
162161
.to_doc()
163162
.map_err(|e| Error(ErrorKind::LoadDocToCompact(e)))?;
164163

165164
// Write the snapshot
166165
let output_chunk_name = SavedChunkName::new_snapshot(doc.get_heads());
167166
let chunk = doc.save();
168-
println!("Going to write: {:#?}", output_chunk_name);
169167
write_chunk(&self.root, &paths, &chunk, output_chunk_name.clone())?;
170168

171169
// Remove all the old data
172170
for incremental in chunks.incrementals.keys() {
173171
let path = paths.chunk_path(&self.root, incremental);
174-
println!("Removing {:?}", path);
175172
std::fs::remove_file(&path)
176173
.map_err(|e| Error(ErrorKind::DeleteChunk(path, e)))?;
177174
}
178175
let just_wrote = paths.chunk_path(&self.root, &output_chunk_name);
179176
for snapshot in chunks.snapshots.keys() {
180177
let path = paths.chunk_path(&self.root, snapshot);
181-
println!("Removing Snap {:?}", path);
182178

183179
if path == just_wrote {
184-
tracing::error!("Somehow trying to delete the same path we just wrote to. Not today Satan");
180+
tracing::trace!("Somehow trying to delete the same path we just wrote to. Not today Satan");
185181
continue;
186182
}
187183

@@ -190,7 +186,6 @@ impl FsStore {
190186
}
191187
}
192188
Ok(None) => {
193-
println!("No existing files,and compaction requested first");
194189
let output_chunk_name = SavedChunkName {
195190
hash: uuid::Uuid::new_v4().as_bytes().to_vec(),
196191
chunk_type: ChunkType::Snapshot,
@@ -199,7 +194,6 @@ impl FsStore {
199194
write_chunk(&self.root, &paths, full_doc, output_chunk_name)?;
200195
}
201196
Err(e) => {
202-
println!("Error loading chunks for {:?} {}", self.root, id);
203197
tracing::error!(e=%e, "Error loading chunks");
204198
}
205199
}
@@ -233,8 +227,8 @@ fn write_chunk(
233227
// with a name based on the hash of the heads of the document
234228
let output_path = paths.chunk_path(root, &name);
235229

236-
tracing::warn!("Renaming: {:?}", temp_save);
237-
tracing::warn!("To: {:?}", output_path);
230+
tracing::trace!("Renaming: {:?}", temp_save);
231+
tracing::trace!("To: {:?}", output_path);
238232

239233
std::fs::rename(&temp_save_path, &output_path)
240234
.map_err(|e| Error(ErrorKind::RenameTempFile(temp_save_path, output_path, e)))?;
@@ -372,7 +366,7 @@ impl Chunks {
372366
fn load(root: &Path, doc_id: &DocumentId) -> Result<Option<Self>, Error> {
373367
let doc_id_hash = DocIdPaths::from(doc_id);
374368
let level2_path = doc_id_hash.level2_path(root);
375-
tracing::warn!(
369+
tracing::trace!(
376370
root=%root.display(),
377371
doc_id=?doc_id,
378372
doc_path=%level2_path.display(),
@@ -408,12 +402,12 @@ impl Chunks {
408402
.map_err(|e| Error(ErrorKind::ErrReadingChunkFileMetadata(path.clone(), e)))?
409403
.is_file()
410404
{
411-
tracing::warn!(bad_file=%path.display(), "unexpected non-file in level2 path");
405+
tracing::trace!(bad_file=%path.display(), "unexpected non-file in level2 path");
412406
continue;
413407
}
414408
let Some(chunk_name) = entry.file_name().to_str().and_then(SavedChunkName::parse)
415409
else {
416-
tracing::warn!(bad_file=%path.display(), "unexpected non-chunk file in level2 path");
410+
tracing::trace!(bad_file=%path.display(), "unexpected non-chunk file in level2 path");
417411
continue;
418412
};
419413
tracing::debug!(chunk_path=%path.display(), "reading chunk file");
@@ -423,7 +417,7 @@ impl Chunks {
423417
match e.kind() {
424418
std::io::ErrorKind::NotFound => {
425419
// Could be a concurrent process compacting, not an error
426-
tracing::warn!(
420+
tracing::trace!(
427421
missing_chunk_path=%path.display(),
428422
"chunk file disappeared while reading chunks",
429423
);

src/network_connect.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ impl RepoHandle {
3535
Ok(repo_msg)
3636
}
3737
Ok(m) => {
38-
tracing::warn!(?m, repo_id=?repo_id, "Received non-repo message");
38+
tracing::trace!(?m, repo_id=?repo_id, "Received non-repo message");
3939
Err(NetworkError::Error)
4040
}
4141
Err(e) => {

src/repo.rs

Lines changed: 2 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -704,7 +704,7 @@ impl DocumentInfo {
704704
let count = {
705705
let doc = self.document.read();
706706
let changes = doc.automerge.get_changes(&self.last_heads);
707-
println!(
707+
tracing::trace!(
708708
"last: {:?}, current: {:?}",
709709
self.last_heads,
710710
doc.automerge.get_heads()
@@ -713,7 +713,6 @@ impl DocumentInfo {
713713
changes.len()
714714
};
715715
let has_patches = count > 0;
716-
println!("Has patches: {:?}", has_patches);
717716
self.patches_since_last_compact = self
718717
.patches_since_last_compact
719718
.checked_add(count)
@@ -733,24 +732,19 @@ impl DocumentInfo {
733732
storage: &dyn Storage,
734733
wake_sender: &Sender<WakeSignal>,
735734
) {
736-
println!("We decided to save the document");
737735
if !self.state.should_save() {
738-
println!("No");
739736
return;
740737
}
741738
let should_compact =
742739
self.patches_since_last_compact > self.allowable_changes_until_compaction;
743740
let (storage_fut, new_heads) = if should_compact {
744-
println!("We decided to Compact the document");
745741
let (to_save, new_heads) = {
746742
let doc = self.document.read();
747743
(doc.automerge.save(), doc.automerge.get_heads())
748744
};
749745
self.patches_since_last_compact = 0;
750-
println!("Since compact is zero");
751746
(storage.compact(document_id.clone(), to_save), new_heads)
752747
} else {
753-
println!("We decided to incremental the document");
754748
let (to_save, new_heads) = {
755749
let doc = self.document.read();
756750
(
@@ -759,10 +753,6 @@ impl DocumentInfo {
759753
)
760754
};
761755
self.patches_since_last_compact.checked_add(1).unwrap_or(0);
762-
println!(
763-
"Saves since last compact {}",
764-
self.patches_since_last_compact
765-
);
766756
(storage.append(document_id.clone(), to_save), new_heads)
767757
};
768758
match self.state {
@@ -1261,7 +1251,6 @@ impl Repo {
12611251
self.sinks_to_poll.insert(to_repo_id);
12621252
}
12631253
if is_first_edit {
1264-
println!("First edit");
12651254
// Send a sync message to all other repos we are connected with.
12661255
for repo_id in self.remote_repos.keys() {
12671256
if let Some(message) = info.generate_first_sync_message(repo_id.clone())
@@ -1355,7 +1344,7 @@ impl Repo {
13551344
let state = info.document.read();
13561345
state.automerge.get_heads()
13571346
};
1358-
println!("Change observer: {:?} {:?}", current_heads, change_hash);
1347+
tracing::trace!("Change observer: {:?} {:?}", current_heads, change_hash);
13591348
if current_heads == change_hash {
13601349
info.change_observers.push(observer);
13611350
} else {

tests/document_changed.rs

Lines changed: 23 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -40,12 +40,16 @@ fn test_document_changed_over_sync() {
4040
);
4141
peers.insert(repo_handle_2.get_repo_id().clone(), network_1);
4242
peers.insert(repo_handle_1.get_repo_id().clone(), network_2);
43-
43+
4444
// Edit the document.
4545
document_handle_1.with_doc_mut(|doc| {
4646
let mut tx = doc.transaction();
47-
tx.put(automerge::ROOT, "repo_id", format!("{}", repo_handle_1.get_repo_id().clone()))
48-
.expect("Failed to change the document.");
47+
tx.put(
48+
automerge::ROOT,
49+
"repo_id",
50+
format!("{}", repo_handle_1.get_repo_id().clone()),
51+
)
52+
.expect("Failed to change the document.");
4953
tx.commit();
5054
});
5155

@@ -63,18 +67,20 @@ fn test_document_changed_over_sync() {
6367
doc_handle.with_doc_mut(|doc| {
6468
println!("Heads when 2 makes edit: {:?}", doc.get_heads());
6569
let id = doc
66-
.get(automerge::ROOT, "repo_id")
67-
.expect("Failed to read the document.")
68-
.unwrap();
69-
println!("Id when two makes edit: {:?}", id);
70-
{let mut tx = doc.transaction();
71-
tx.put(
72-
automerge::ROOT,
73-
"repo_id",
74-
format!("{}", repo_handle_2.get_repo_id()),
75-
)
76-
.expect("Failed to change the document.");
77-
tx.commit();}
70+
.get(automerge::ROOT, "repo_id")
71+
.expect("Failed to read the document.")
72+
.unwrap();
73+
println!("Id when two makes edit: {:?}", id);
74+
{
75+
let mut tx = doc.transaction();
76+
tx.put(
77+
automerge::ROOT,
78+
"repo_id",
79+
format!("{}", repo_handle_2.get_repo_id()),
80+
)
81+
.expect("Failed to change the document.");
82+
tx.commit();
83+
}
7884
println!("Heads after 2 makes edit: {:?}", doc.get_heads());
7985
});
8086
});
@@ -84,13 +90,13 @@ fn test_document_changed_over_sync() {
8490
let repo_id = repo_handle_1.get_repo_id().clone();
8591
rt.spawn(async move {
8692
loop {
87-
// Await changes until the edit comes through over sync.
93+
// Await changes until the edit comes through over sync.
8894
let equals = document_handle_1.with_doc(|doc| {
8995
let val = doc
9096
.get(automerge::ROOT, "repo_id")
9197
.expect("Failed to read the document.")
9298
.unwrap();
93-
println!("Val: {:?}", val);
99+
println!("Val: {:?}", val);
94100
val.0.to_str().unwrap() == format!("{}", expected_repo_id)
95101
});
96102
if equals {

tests/fs_storage/main.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
use automerge::transaction::Transactable;
22
use automerge_repo::fs_store;
33
use itertools::Itertools;
4+
use uuid::Uuid;
45

56
/// Asserts that the &[u8] in `data` is some permutation of the chunks of Vec<&[u8> in `expected`
67
macro_rules! assert_permutation_of {

0 commit comments

Comments
 (0)