-
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcrow-tools-reference.txt
More file actions
9290 lines (8292 loc) · 348 KB
/
crow-tools-reference.txt
File metadata and controls
9290 lines (8292 loc) · 348 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Reference snapshot of the crow-tools repository (https://github.com/CrowCall/crow-tools). Used for development context — not part of the Orpheus codebase.
==========================================
CROW-TOOLS REPOSITORY SNAPSHOT
Generated: 2025-12-05 10:35:38
==========================================
<FULL TREE>
.
|-- classifier
| |-- models
| | +-- best_model.ckpt
| |-- classify.py
| |-- cluster.py
| |-- dataset.py
| |-- evaluate.py
| |-- model.py
| |-- optuna_tuning.py
| |-- preview_auto_labels.py
| |-- remove_dupes.py
| |-- RESULTS.txt
| +-- train.py
|-- denoiser
| +-- denoise_crows.py
|-- detector
| |-- detect.py
| |-- detect_all.py
| +-- detect_all_birdnet.py
|-- docs
| |-- images
| | |-- crow-animation.gif
| | |-- crow-emote.png
| | |-- crow-tools-presentation-cover.png
| | |-- denoiser.png
| | |-- detector-timeline.png
| | |-- embeddings.png
| | |-- labeler.png
| | |-- separator.png
| | +-- transcriber.png
| +-- videos
| +-- embeddings.gif
|-- downloader
| |-- download_backgrounds.py
| |-- download_crows_ebird.py
| +-- download_crows_xeno.py
|-- embedder
| |-- ispa
| | |-- models
| | | |-- aves-base-bio.torchaudio.model_config.json
| | | |-- aves-base-bio.torchaudio.pt
| | | |-- c2p.aves.json
| | | |-- c2p.mfcc.json
| | | |-- kmeans.aves.pkl
| | | +-- kmeans.mfcc.pkl
| | |-- __init__.py
| | |-- acoustics.py
| | |-- features.py
| | +-- utils.py
| |-- __init__.py
| |-- analyze.py
| |-- embed.py
| +-- embed_all.py
|-- labeler
| |-- js
| | |-- components
| | | |-- filterComponent.js
| | | |-- menu.js
| | | |-- pagination.js
| | | +-- segmentCard.js
| | +-- app.js
| |-- embeddings.html
| |-- index.html
| |-- package-lock.json
| |-- package.json
| |-- server.js
| +-- transcriptions.html
|-- separator
| |-- models
| | +-- best_model_epoch=94.ckpt
| |-- samples
| | |-- non-overlapping-multiple-crows.mp3
| | |-- overlapping-crows-1.wav
| | |-- overlapping-crows-2.wav
| | |-- overlapping-crows-3.wav
| | +-- softsong.mp3
| |-- dataset.py
| |-- migrate.py
| |-- mix.py
| |-- separate.py
| +-- train.py
|-- .gitattributes
|-- CITATION.cff
|-- condense_for_llm.sh
|-- crow-tools-condensed-for-llm.out
|-- get-data.py
|-- LICENSE.md
|-- README.md
+-- requirements.txt
<FILE SUMMARY>
Total files included in this condensed view:
Files: 50
<FILES BY DIRECTORY>
(root): 6 files
classifier: 10 files
denoiser: 1 files
detector: 3 files
downloader: 3 files
embedder/ispa/models: 3 files
embedder/ispa: 4 files
embedder: 4 files
labeler/js/components: 4 files
labeler/js: 1 files
labeler: 6 files
separator: 5 files
<FILE CONTENTS>
==========================================
<CITATION.cff>
Size: 541 bytes | Lines: 19
==========================================
cff-version: 1.2.0
message: "If crow-tools was useful for your research, we'd love a citation!"
title: "crow-tools: open-source tools for analyzing crow vocalizations"
authors:
- family-names: Thomas
given-names: Jonathan
email: crows@openshot.org
- family-names: Thomas
given-names: Madeline
email: crows@owlmaddie.com
date-released: 2025-03-22
version: "1.0.0"
repository-code: https://github.com/CrowCall/crow-tools
keywords:
- crows
- bioacoustics
- machine learning
- audio analysis
- animal communication
==========================================
</CITATION.cff>
==========================================
==========================================
<LICENSE.md>
Size: 1092 bytes | Lines: 21
==========================================
MIT License
Copyright (c) 2025 OpenShot Studios LLC, owlmaddie LLC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
==========================================
</LICENSE.md>
==========================================
==========================================
<README.md>
Size: 8532 bytes | Lines: 160
==========================================
# Decoding Crow Communication with AI
## Crows are among the most intelligent and vocal birds on the planet. But what exactly are they saying?
<img src="docs/images/crow-animation.gif" align="left" width="50%" title="Original animation by @owlmaddie">
This project aims to **explore** and **decode** the rich and mysterious world of **crow communication** using cutting-edge
machine learning and audio analysis.
This repository is your toolkit for working with thousands of **American crow** ([Corvus brachyrhynchos](https://www.allaboutbirds.org/guide/American_Crow/overview)) **calls**—from downloading and cleaning audio, to
detecting individual calls, generating embeddings, classifying vocalizations, and even manually labeling them
through a web interface.
Whether you're a bird enthusiast, a researcher, or just someone fascinated by animal intelligence, this project
offers a glimpse into the complex vocal language of these remarkable creatures—and the possibility of better
understanding them through technology.
## Install Dependencies
This project was built on **Ubuntu 24.04** and **Python 3.8+**, however it should be compatible with most linux
and mac systems. **Git LFS** is required to correctly clone, pull, and inflate all models. **FFmpeg** is required
to run the denoiser.
```
sudo apt install ffmpeg git-lfs portaudio19-dev python3-tk
pip install -r requirements.txt
```
## Download and Prepare Data
Run this script to download, denoise, embed, and auto-label all crow audio files.
NOTE: This will download more than **30 GB** of data into a local `.cache` directory.
```
python get-data.py
```
## Introduction
[](https://docs.google.com/presentation/d/15q6MwZquIA3LaXqK29C7NRGKmcn5BTSwK5TFsem-QGk/present)
## Module Overview
### Downloader
The downloader module retrieves a large collection of crow vocalizations (13+ GB) from multiple public repositories.
It handles the complexities of connecting to each source, downloading the audio files, and storing relevant metadata
for proper attribution. Credits and licensing info for all files are saved in the `.cache/csv/` directory.
Crow-tools relies on openly available datasets for research and development. We gratefully acknowledge the following sources:
- [Macaulay Library – American Crow (*Corvus brachyrhynchos*)](https://search.macaulaylibrary.org/catalog?taxonCode=amecro&mediaType=audio)
© Cornell Lab of Ornithology. A comprehensive archive of wildlife recordings, used in accordance with licensing terms for non-commercial research.
- [Xeno-Canto – American Crow (*Corvus brachyrhynchos*)](https://xeno-canto.org/species/corvus-brachyrhynchos)
A global, community-powered collection of bird calls, shared under various Creative Commons licenses. Many thanks to the recordists who make this work possible.
### Denoiser
The denoiser module cleans the crow audio files by removing unwanted background noise. This process improves the
quality of the audio for subsequent processing steps by focusing on the relevant crow sounds. It also enables
the creation of mixes (overlapping crow sounds) to train our separator model. This module utilizes the [biodenoising](https://github.com/earthspecies/biodenoising-inference)
module created by [Earth Species Project](https://earthspecies.org/).

### Classifier
The classifier module analyzes crow call embeddings and categorizes them (i.e. auto labels) into various types such as
alert, number of calls, age indicators, rattles, soft songs, and quality of audio. It processes the embedded data and
applies machine learning techniques to identify and label crow vocalizations.
```python
{
"crowCount": int in [1,2,3,4], # 1 = single, 2 = two crows, 3 = unused, 4 = crowd
"crowAge": int in [1,2], # 1 = adult, 2 = juvenile
"alert": bool, # alert calls
"begging": bool, # food related calls
"softSong": bool, # sub songs | soft sounds
"rattle": bool, # rattle sounds
"mob": bool, # anger calls | mob | attack
"quality": int in [1,2,3], # 1 = bad, 2 = good, 3 = unused
}
```
### Detector
The detector module leverages our custom trained crow classifier, to quickly find all crow sounds across an audio file.
By isolating these segments, the module enables more focused analysis and processing of individual crow calls and vocalizations.
We also include an interactive crow timeline app to review and listen to the detections:

```json
[
{
"start_time": 37.0,
"end_time": 38.0,
"crowCount": 2,
"crowAge": 1,
"alert": false,
"begging": false,
"softSong": false,
"rattle": true,
"mob": false,
"quality": 2
}
]
```
### Embedder
The embedder module transforms each crow call into a 768-dimensional vector using the [AVES](https://github.com/earthspecies/aves?tab=readme-ov-file#birdaves) embedding model. This
transformation creates a numerical representation of the audio, which is essential for further analysis and machine
learning applications.

### Labeler
The labeler module provides a web interface for manual labeling of crow calls. This interface is designed for
human labeling and review, ensuring that the training data for the classifier is accurate and reliable. It also
provides a 3D interactive embedding feature. Built with Vue v3 and Node.js.

### Transcriber
The transcriber module provides a web interface for segmenting crow audio and labeling calls with a custom notation
system and limited vocabulary tokenizer. It supports waveform and spectrogram views, audio playback, and SRT export
for training crow-to-text models. Built with Vue v3 and Node.js.

### Separator
The separator module is responsible for separating overlapping crow calls into distinct audio files. This process
enables clearer analysis by isolating individual calls that may be mixed together in the original recordings.

## Directory Structure
```
crow-tools/
├── classifier/ # classify types of crow calls (alert, count, age, rattle, soft/sub song, quality)
├── denoiser/ # denoise crow audio files (remove background noises with biodenoising model)
├── detector/ # detect crow audio segments (1 second each, uses classifier model)
├── downloader/ # download library of crow audio files
├── embedder/ # embed crow calls into 768 dimensions (AVES embedding model)
├── labeler/ # human labeling web app (for training classifier, vieweing embeddings, and transcribing crow language)
├── separator/ # separate overlapping crow calls into seaparate audio files (train and inference)
└── .cache/ # all downloaded and generated files (30+ GB)
```
## Authors
This project is a collaborative effort driven by a shared curiosity and love for crows — with the ultimate goal of better understanding their complex and intelligent vocal language.
- **[Jonathan Thomas](mailto:crows@openshot.org)** is the creator of [OpenShot Video Editor](https://www.openshot.org/) and brings deep experience in software development and artificial intelligence. Through his company, **OpenShot Studios LLC**, he leads the technical development of tools for large-scale audio analysis, machine learning, and crow call classification.
- **[Madeline Thomas](mailto:crows@owlmaddie.com) (@owlmaddie)** is a professional [artist / animator](https://www.owlmaddie.com/) and crow enthusiast. Through **owlmaddie LLC**, she contributes her creative talents to the project — labeling thousands of crow calls, designing expressive crow animations, and helping to communicate the science through visual storytelling.
Together, we're building a **state-of-the-art toolkit for decoding and exploring crow communication**, blending AI, design, and a shared passion for one of nature’s most fascinating birds.
## Citations
Give our crows a shout-out — cite `crow-tools` in your work! GitHub provides a citation file — just click the **Cite this repository** button in the sidebar at the top of this page.
==========================================
</README.md>
==========================================
==========================================
<classifier/RESULTS.txt>
Size: 1779 bytes | Lines: 85
==========================================
/home/jonathan/apps/earthspecies/crow-tools/.venv/bin/python /home/jonathan/apps/earthspecies/crow-tools/classifier/evaluate.py
Seed set to 18202
=== Label Statistics ===
Total labels loaded: 3894
crowCount distribution:
Class 0: 1245
Class 1: 2048
Class 2: 345
Class 4: 256
crowAge distribution:
Class 1: 3550
Class 2: 344
quality distribution:
Class 1: 1347
Class 2: 2547
alert count: 593
begging count: 307
softSong count: 612
rattle count: 543
mob count: 753
Evaluating on 468 VALIDATE samples
=== Overall Accuracy ===
crowCount : 82.91% (388/468)
crowAge : 96.15% (450/468)
quality : 91.67% (429/468)
alert : 90.81% (425/468)
begging : 96.15% (450/468)
softSong : 94.02% (440/468)
rattle : 95.51% (447/468)
mob : 88.68% (415/468)
=== crowCount Breakdown ===
Class 0: 89.21% (124/139)
Class 1: 87.69% (228/260)
Class 2: 40.48% (17/42)
Class 3: 0.00% (0/0)
Class 4: 70.37% (19/27)
=== crowAge Breakdown ===
Class 1: 98.56% (410/416)
Class 2: 76.92% (40/52)
=== quality Breakdown ===
Class 1: 85.91% (128/149)
Class 2: 94.36% (301/319)
=== alert Breakdown ===
Value 0: 93.37% (366/392)
Value 1: 77.63% (59/76)
=== begging Breakdown ===
Value 0: 98.11% (415/423)
Value 1: 77.78% (35/45)
=== softSong Breakdown ===
Value 0: 97.18% (379/390)
Value 1: 78.21% (61/78)
=== rattle Breakdown ===
Value 0: 96.94% (380/392)
Value 1: 88.16% (67/76)
=== mob Breakdown ===
Value 0: 95.00% (361/380)
Value 1: 61.36% (54/88)
=== Composite Score ===
Overall composite score: 84.39%
Individual task scores:
crowCount : 71.94%
crowAge : 87.74%
quality : 90.13%
alert : 85.50%
begging : 87.94%
softSong : 87.69%
rattle : 92.55%
mob : 78.18%
==========================================
</classifier/RESULTS.txt>
==========================================
==========================================
<classifier/classify.py>
Size: 2101 bytes | Lines: 64
==========================================
import os
import torch
import numpy as np
from classifier.model import CrowClassifier
# Set up paths.
PATH = os.path.dirname(__file__)
checkpoint_path = os.path.join(PATH, "models", "best_model.ckpt")
# Determine device.
device = "cuda" if torch.cuda.is_available() else "cpu"
# Load the model only once.
if os.path.exists(checkpoint_path):
_model = CrowClassifier.load_from_checkpoint(checkpoint_path)
else:
print(f"Model checkpoint not found: {checkpoint_path}")
_model = CrowClassifier()
_model.to(device)
_model.eval()
def predict_embedding(embedding, device=device):
"""
Given an embedding (numpy array of shape [768]), perform inference using the
pre-loaded model and return a predicted label dictionary in the new format:
{
"crowCount": int in [1,2,3,4],
"crowAge": int in [1,2],
"alert": bool,
"begging": bool,
"softSong": bool,
"rattle": bool,
"mob": bool,
"quality": int in [1,2,3],
}
"""
# Ensure the embedding is a float32 numpy array and add a batch dimension.
embedding_tensor = torch.from_numpy(embedding.astype(np.float32)).unsqueeze(0).to(device)
# Perform inference.
with torch.no_grad():
outputs = _model(embedding_tensor)
new_pred = {}
new_pred["crowCount"] = torch.argmax(outputs["crowCount"], dim=1).item()
new_pred["crowAge"] = torch.argmax(outputs["crowAge"], dim=1).item() + 1
new_pred["quality"] = torch.argmax(outputs["quality"], dim=1).item() + 1
new_pred["alert"] = (outputs["alert"].squeeze() > 0).item()
new_pred["begging"] = (outputs["begging"].squeeze() > 0).item()
new_pred["softSong"] = (outputs["softSong"].squeeze() > 0).item()
new_pred["rattle"] = (outputs["rattle"].squeeze() > 0).item()
new_pred["mob"] = (outputs["mob"].squeeze() > 0).item()
return new_pred
if __name__ == "__main__":
# Example: Replace with your actual numpy array of 768 numbers.
dummy_embedding = np.random.randn(768)
predicted_label = predict_embedding(dummy_embedding)
print("Predicted Label:", predicted_label)
==========================================
</classifier/classify.py>
==========================================
==========================================
<classifier/cluster.py>
Size: 17340 bytes | Lines: 428
==========================================
import os
import glob
import json
import random
import numpy as np
import librosa
import sounddevice as sd
from sklearn.decomposition import PCA
import faiss
from collections import defaultdict
# Paths
BASE_PATH = os.path.dirname(__file__)
EMBEDDINGS_DIR = os.path.join(BASE_PATH, "..", ".cache", "embeddings-denoised")
VOLUMES_DIR = os.path.join(BASE_PATH, "..", ".cache", "embeddings-denoised-volumes")
OUTPUT_SEGMENTS = os.path.join(BASE_PATH, "..", ".cache", "cluster_segments.json")
OUTPUT_LABELS = os.path.join(BASE_PATH, "..", ".cache", "cluster_labels.json")
AUDIO_DIR = os.path.join(BASE_PATH, "..", ".cache", "library-denoised")
INDEX_PATH = os.path.join(BASE_PATH, "..", ".cache", "faiss_index.index")
LABEL_TEMPLATE_FILE = os.path.join(BASE_PATH, "..", ".cache", "cluster_segments_labels.json")
# Parameters
STARTING_CLUSTER_ID = 65
VOLUME_THRESHOLD = 0.0002
SUBSAMPLE_FACTOR = 1.0
PCA_COMPONENTS = 75
MAX_CLUSTER_SIZE = 500 # Maximum leaf size before splitting stops.
MERGE_THRESHOLD = 0.15 # Merge leaves if cosine distance < 0.15 (single pass)
PREVIEW_SEEDS = False
PREVIEW_CLUSTERS = False
ONLY_OUTPUT_SEEDS = True
PREVIEW_PER_CLUSTER = 10
NUM_REPRESENTATIVE = 10 # Number of segments per merged cluster for labeling
# Seed examples for new clusters.
SEED_EXAMPLES = [
# Rattles
#{"file_id": "365208991", "start": 13.0, "end": 14.0}, # Good, 9 similar
#{"file_id": "227497211", "start": 48.0, "end": 49.0}, # Good, 7 similar
#{"file_id": "124568031", "start": 7.0, "end": 8.0}, # Good 10+ similar, (had to lower volume to 0.0002)
#{"file_id": "431165421", "start": 12.0, "end": 13.0}, # Okay, 3 similar
#{"file_id": "58460", "start": 12.0, "end": 13.0}, # Good, 8 similar
#{"file_id": "504976401", "start": 4.0, "end": 5.0}, # Great, 9 similar
#{"file_id": "122364731", "start": 3.0, "end": 4.0}, # Okay, 3 similar
#{"file_id": "163637", "start": 1.0, "end": 2.0}, # Okay, 3-4 similar
#{"file_id": "496356", "start": 9.0, "end": 10.0}, # Good, 5 similar
#{"file_id": "156527", "start": 30.0, "end": 31.0}, # Okay, 3 or 4 similar
{"file_id": "227497211", "start": 48.0, "end": 49.0},
{"file_id": "365208991", "start": 27.0, "end": 28.0},
{"file_id": "361178511", "start": 27.0, "end": 28.0},
{"file_id": "361178511", "start": 13.0, "end": 14.0},
{"file_id": "156527", "start": 6.0, "end": 7.0},
{"file_id": "92055", "start": 15.0, "end": 16.0},
{"file_id": "619206184", "start": 17.0, "end": 18.0},
# Sub/Soft Song
{"file_id": "229159", "start": 74.0, "end": 75.0},
{"file_id": "542024451", "start": 7.0, "end": 8.0},
{"file_id": "539550101", "start": 12.0, "end": 13.0},
{"file_id": "535466271", "start": 1.0, "end": 2.0},
{"file_id": "408950861", "start": 21.0, "end": 22.0},
{"file_id": "319547721", "start": 100.0, "end": 101.0},
{"file_id": "167792", "start": 20.0, "end": 21.0},
#{"file_id": "408950861", "start": 20.0, "end": 21.0},
#{"file_id": "13123", "start": 34.0, "end": 35.0},
#{"file_id": "984442", "start": 33.0, "end": 34.0},
#{"file_id": "984442", "start": 6.0, "end": 7.0},
# Juvenile begging
#{"file_id": "32684421", "start": 39.0, "end": 40.0},
#{"file_id": "32684421", "start": 22.0, "end": 23.0},
#{"file_id": "32684421", "start": 10.0, "end": 11.0},
#{"file_id": "32684421", "start": 3.0, "end": 4.0}
]
def play_audio_preview(audio_file, start_time, duration):
try:
y, sr = librosa.load(audio_file, sr=None, offset=start_time, duration=duration)
sd.play(y, sr)
sd.wait()
except Exception as e:
print(f"Error playing {audio_file}: {e}")
def load_non_silent_embeddings(emb_dir, vol_dir, thresh):
emb_list, ids, total = [], [], 0
for path in sorted(glob.glob(os.path.join(emb_dir, "*.npy"))):
file_id = os.path.splitext(os.path.basename(path))[0]
vol_path = os.path.join(vol_dir, f"{file_id}.npy")
if not os.path.exists(vol_path):
continue
try:
emb_data = np.load(path)
vol_data = np.load(vol_path)
if vol_data.ndim > 1:
vol_data = vol_data.squeeze(-1)
except Exception:
continue
total += emb_data.shape[0]
for i in range(emb_data.shape[0]):
if vol_data[i] > thresh:
emb_list.append(emb_data[i])
ids.append((file_id, i))
return np.array(emb_list, dtype=np.float32), ids, total
def random_subsample(embeddings, ids, factor):
seed_file_ids = {seed["file_id"] for seed in SEED_EXAMPLES}
seed_indices = [i for i, (file_id, _) in enumerate(ids) if file_id in seed_file_ids]
other_indices = [i for i, (file_id, _) in enumerate(ids) if file_id not in seed_file_ids]
if factor >= 1.0:
selected_other = other_indices
else:
n_keep = int(len(other_indices) * factor)
selected_other = sorted(random.sample(other_indices, n_keep))
selected_indices = sorted(seed_indices + selected_other)
return embeddings[selected_indices], [ids[i] for i in selected_indices]
def reduce_dim_pca(embeddings, n_components=64):
pca = PCA(n_components=n_components, random_state=42)
reduced = pca.fit_transform(embeddings)
print("Explained variance (first 5):", pca.explained_variance_ratio_[:5])
return reduced, pca
def normalize_embeddings(embeddings):
norms = np.linalg.norm(embeddings, axis=1, keepdims=True)
norms = np.clip(norms, 1e-8, None)
return embeddings / norms
def hierarchical_split(embeddings, indices, max_size=500, level=0):
if len(indices) <= max_size:
return {"indices": indices, "level": level, "size": len(indices)}
k = 2
d = embeddings.shape[1]
kmeans = faiss.Kmeans(d, k, niter=20, verbose=False, seed=42)
subset = embeddings[indices]
kmeans.train(subset)
D, I = kmeans.index.search(subset, 1)
assignments = I.flatten()
clusters = {i: [] for i in range(k)}
for j, assign in enumerate(assignments):
clusters[assign].append(indices[j])
result = {"level": level, "size": len(indices), "children": {}}
for i in range(k):
result["children"][i] = hierarchical_split(embeddings, clusters[i], max_size, level + 1)
return result
def collect_leaves(hierarchy):
leaves = []
if "children" not in hierarchy:
leaves.append(hierarchy)
else:
for child in hierarchy["children"].values():
leaves.extend(collect_leaves(child))
return leaves
def compute_leaf_center(norm_emb, indices):
center = np.mean(norm_emb[indices], axis=0)
center /= np.linalg.norm(center)
return center
def merge_leaves_once(leaves, merges):
merged = set()
new_leaves = []
for (i, j, dist) in merges:
if i not in merged and j not in merged:
union_indices = leaves[i]["indices"] + leaves[j]["indices"]
new_leaf = {"indices": union_indices,
"level": min(leaves[i]["level"], leaves[j]["level"]),
"size": len(union_indices)}
new_leaves.append(new_leaf)
merged.add(i)
merged.add(j)
for i in range(len(leaves)):
if i not in merged:
new_leaves.append(leaves[i])
return new_leaves
def process_seed_examples(norm_emb, ids, faiss_index, max_per_file=4):
# Process each seed example individually.
seed_clusters = {}
default_template = {
"crowCount": 1, "crowAge": 1,
"alert": False, "begging": False,
"softSong": False, "rattle": False, "mob": False,
"quality": 2, "reviewed": False
}
for i, seed in enumerate(SEED_EXAMPLES):
target_index = None
for idx, (file_id, sec) in enumerate(ids):
if file_id == seed["file_id"] and int(sec) == int(seed["start"]):
target_index = idx
break
if target_index is None:
print(f"Seed example {seed} not found.")
continue
vec = norm_emb[target_index].reshape(1, norm_emb.shape[1])
D, I = faiss_index.search(vec, NUM_REPRESENTATIVE)
results = []
file_counts = defaultdict(int)
for file_id, sec in [ids[idx] for idx in I[0]]:
if file_counts[file_id] < max_per_file:
results.append({"file_id": file_id, "start_time": float(sec), "end_time": float(sec + 1)})
file_counts[file_id] += 1
if results:
seed_clusters[str(i)] = results
return seed_clusters
def build_and_save_clusters(merged_leaves, seed_clusters, ids, norm_emb):
"""
Build segments and labels from both merged clusters and seed clusters.
Existing segments and labels are loaded (if available) and new records are
appended only if they are not already present.
- For merged clusters, segments are sorted in similarity order for preview,
then the top representative segments are chosen.
- For seed clusters, segments are added if not already present.
- Segments in each file are sorted by start_time before saving.
- Duplicate segment keys (file, start, end) are avoided.
"""
from collections import defaultdict
# Load existing segments and labels if the files exist.
if os.path.exists(OUTPUT_SEGMENTS):
with open(OUTPUT_SEGMENTS, "r") as f:
existing_segments = json.load(f)
else:
existing_segments = {}
if os.path.exists(OUTPUT_LABELS):
with open(OUTPUT_LABELS, "r") as f:
existing_labels = json.load(f)
else:
existing_labels = {}
# New segments and labels to add.
new_segments = defaultdict(list)
new_labels = {}
if os.path.exists(LABEL_TEMPLATE_FILE):
with open(LABEL_TEMPLATE_FILE, "r") as f:
label_templates = json.load(f)
else:
label_templates = {}
default_template = {
"crowCount": 1, "crowAge": 1,
"alert": False, "begging": False,
"softSong": False, "rattle": False, "mob": False,
"quality": 2, "reviewed": False
}
cluster_id = STARTING_CLUSTER_ID
# Process merged clusters.
for leaf in merged_leaves:
center = compute_leaf_center(norm_emb, leaf["indices"])
sorted_indices = sorted(leaf["indices"],
key=lambda idx: np.dot(norm_emb[idx], center),
reverse=True)
if PREVIEW_CLUSTERS:
while True:
print(f"\nMerged Cluster {cluster_id} (Size {leaf['size']}):")
for idx in sorted_indices[:PREVIEW_PER_CLUSTER]:
file_id, sec = ids[idx]
sim = np.dot(norm_emb[idx], center)
print(f" File: {file_id}, Start: {sec}, Similarity: {sim:.4f}")
audio_file = os.path.join(AUDIO_DIR, f"{file_id}.wav")
play_audio_preview(audio_file, float(sec), 1.0)
user_input = input("Press 'R' to repeat or Enter to continue: ")
if user_input.strip().lower() != 'r':
break
for idx in sorted_indices[:NUM_REPRESENTATIVE]:
file_id, sec = ids[idx]
start_time = float(sec)
end_time = start_time + 1.0
seg_key = f"{file_id}-{int(start_time)}-{int(end_time)}"
# Avoid duplicate label keys in new labels.
if seg_key in new_labels:
continue
seg = {"common_name": "American Crow",
"scientific_name": "Corvus brachyrhynchos",
"start_time": start_time,
"end_time": end_time,
"confidence": 0.0,
"cluster": cluster_id}
new_segments[file_id].append(seg)
if str(cluster_id) in label_templates:
new_labels[seg_key] = label_templates[str(cluster_id)]
else:
new_labels[seg_key] = default_template.copy()
new_labels[seg_key]["cluster"] = cluster_id
cluster_id += 1
# Process seed clusters.
for seed_id, seg_list in seed_clusters.items():
current_cluster_id = cluster_id
for seg in seg_list:
file_id = seg["file_id"]
start_time = float(seg["start_time"])
end_time = float(seg["end_time"])
seg_key = f"{file_id}-{int(start_time)}-{int(end_time)}"
if seg_key in new_labels:
continue
seg_entry = {"common_name": "American Crow",
"scientific_name": "Corvus brachyrhynchos",
"start_time": start_time,
"end_time": end_time,
"confidence": 0.0,
"cluster": current_cluster_id}
new_segments[file_id].append(seg_entry)
if str(cluster_id) in label_templates:
new_labels[seg_key] = label_templates[str(cluster_id)]
else:
new_labels[seg_key] = default_template.copy()
new_labels[seg_key]["cluster"] = current_cluster_id
if PREVIEW_SEEDS:
while True:
print(f"\nSeed Cluster {current_cluster_id}:")
for seg in seg_list[:PREVIEW_PER_CLUSTER]:
print(f" File: {seg['file_id']}, Start: {seg['start_time']}, End: {seg['end_time']}")
audio_file = os.path.join(AUDIO_DIR, f"{seg['file_id']}.wav")
play_audio_preview(audio_file, seg["start_time"], 1.0)
user_input = input("Press 'R' to repeat or Enter to continue: ")
if user_input.strip().lower() != 'r':
break
cluster_id += 1
# Merge new segments with existing segments.
for file_id, seg_list in new_segments.items():
if file_id not in existing_segments:
existing_segments[file_id] = seg_list
else:
# For each new segment, check if a segment with the same start and end exists.
existing_keys = {(seg["start_time"], seg["end_time"]) for seg in existing_segments[file_id]}
for seg in seg_list:
key = (seg["start_time"], seg["end_time"])
if key not in existing_keys:
existing_segments[file_id].append(seg)
else:
print(f"Skipping duplicate segment for file {file_id} at {key}")
# Merge new labels with existing labels.
for key, label in new_labels.items():
if key in existing_labels:
print(f"Skipping duplicate label: {key}")
else:
existing_labels[key] = label
# Ensure segments in each file are sorted by start_time.
for file_id in existing_segments:
existing_segments[file_id] = sorted(existing_segments[file_id], key=lambda x: x["start_time"])
# Save updated JSON outputs.
with open(OUTPUT_SEGMENTS, "w") as f:
json.dump(existing_segments, f, indent=2)
with open(OUTPUT_LABELS, "w") as f:
json.dump(existing_labels, f, indent=2)
print(f"\nSaved segments to {OUTPUT_SEGMENTS}")
print(f"Saved cluster labels to {OUTPUT_LABELS}")
return existing_segments, existing_labels
def main():
random.seed(42)
np.random.seed(42)
# Load embeddings.
embeddings, ids, total = load_non_silent_embeddings(EMBEDDINGS_DIR, VOLUMES_DIR, VOLUME_THRESHOLD)
print(f"Processed {total} seconds. Non-silent segments: {embeddings.shape[0]}")
embeddings, ids = random_subsample(embeddings, ids, SUBSAMPLE_FACTOR)
print(f"After subsampling: {embeddings.shape[0]} segments")
# Dimensionality reduction.
reduced, _ = reduce_dim_pca(embeddings, PCA_COMPONENTS)
# Normalize embeddings.
norm_emb = normalize_embeddings(reduced)
# Build a Faiss index for similarity search.
d = norm_emb.shape[1]
faiss_index = faiss.IndexFlatIP(d)
faiss_index.add(norm_emb)
# Hierarchical clustering via recursive k-means splitting.
if not ONLY_OUTPUT_SEEDS:
all_indices = list(range(len(norm_emb)))
hierarchy = hierarchical_split(norm_emb, all_indices, max_size=MAX_CLUSTER_SIZE)
leaves = collect_leaves(hierarchy)
print(f"\nTotal leaf clusters: {len(leaves)}")
# Compute leaf centers and determine merge candidates.
centers = {i: compute_leaf_center(norm_emb, leaf["indices"]) for i, leaf in enumerate(leaves)}
num_leaves = len(centers)
merges = []
for i in range(num_leaves):
for j in range(i + 1, num_leaves):
cosine_distance = 1 - np.dot(centers[i], centers[j])
if cosine_distance < MERGE_THRESHOLD:
merges.append((i, j, cosine_distance))
merged_leaves = merge_leaves_once(leaves, merges)
print(f"After merging, total clusters: {len(merged_leaves)}")
else:
merged_leaves = []
# Process seed examples.
seed_clusters = process_seed_examples(norm_emb, ids, faiss_index, max_per_file=4)
if seed_clusters:
print(f"\nProcessed {len(seed_clusters)} seed clusters.")
# Combine merged and seed clusters, preview as needed, sort and save JSON.
build_and_save_clusters(merged_leaves, seed_clusters, ids, norm_emb)
if __name__ == "__main__":
main()
==========================================
</classifier/cluster.py>
==========================================
==========================================
<classifier/dataset.py>
Size: 3969 bytes | Lines: 111
==========================================
import os
import json
import torch
from torch.utils.data import Dataset
import numpy as np
PATH = os.path.dirname(__file__)
embeddings_dir = os.path.join(PATH, "..", ".cache", "embeddings")
labels_file = os.path.join(PATH, "..", ".cache", "cluster_labels.json")
class CrowDataset(Dataset):
def __init__(self):
# Load the labels from the JSON file.
with open(labels_file, 'r') as f:
self.raw_labels = json.load(f)
self.labels = { key: label for key, label in self.raw_labels.items() if "reviewed" in label and label["reviewed"] }
self.keys = list(self.labels.keys())
self.print_label_stats()
def print_label_stats(self):
total_labels = len(self.labels)
counts = {
"crowCount": {},
"crowAge": {},
"quality": {},
"alert": 0,
"begging": 0,
"softSong": 0,
"rattle": 0,
"mob": 0
}
for key, label in self.labels.items():
cc = label.get("crowCount", 1)
counts["crowCount"][cc] = counts["crowCount"].get(cc, 0) + 1
ca = label.get("crowAge", 1)
counts["crowAge"][ca] = counts["crowAge"].get(ca, 0) + 1
q = label.get("quality", 2)
if q == 3:
q = 2 # Force value 3 (HQ) to be a 2 (for training)
counts["quality"][q] = counts["quality"].get(q, 0) + 1