forked from speaches-ai/speaches
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-entrypoint.sh
More file actions
51 lines (41 loc) · 1.57 KB
/
docker-entrypoint.sh
File metadata and controls
51 lines (41 loc) · 1.57 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
#!/bin/bash
set -e
echo "=========================================="
echo "Speaches Container Startup"
echo "=========================================="
# Configuration - can be overridden by environment variables
PRELOAD_MODELS="${PRELOAD_MODELS:-speaches-ai/Kokoro-82M-v1.0-ONNX-fp16,deepdml/faster-whisper-large-v3-turbo-ct2}"
SKIP_MODEL_DOWNLOAD="${SKIP_MODEL_DOWNLOAD:-false}"
# Function to check if a model is already cached
model_is_cached() {
local model_id="$1"
local cache_path="${HF_HUB_CACHE:-$HOME/.cache/huggingface/hub}"
local model_dir="models--${model_id//\/\/--}"
if [ -d "$cache_path/$model_dir" ]; then
return 0 # Model exists
else
return 1 # Model doesn't exist
fi
}
# Download models if not skipped
if [ "$SKIP_MODEL_DOWNLOAD" != "true" ]; then
echo "Checking for required models..."
IFS=',' read -ra MODELS <<< "$PRELOAD_MODELS"
for model_id in "${MODELS[@]}"; do
model_id=$(echo "$model_id" | xargs) # Trim whitespace
if model_is_cached "$model_id"; then
echo "✓ Model already cached: $model_id"
else
echo "↓ Downloading model: $model_id"
python model_downloader.py "$model_id" || echo "⚠ Warning: Failed to download $model_id"
fi
done
echo "Model preparation complete!"
else
echo "Skipping model download (SKIP_MODEL_DOWNLOAD=true)"
fi
echo "=========================================="
echo "Starting Speaches Server..."
echo "=========================================="
# Execute the main command (CMD from Dockerfile)
exec "$@"