Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,8 @@ function setup_homekit_config() {
local config_path="$1"

if [[ ! -f "${config_path}" ]]; then
echo "[INFO] Creating empty HomeKit config file..."
echo 'homekit: {}' > "${config_path}"
echo "[INFO] Creating empty config file for HomeKit..."
echo '{}' > "${config_path}"
fi

# Convert YAML to JSON for jq processing
Expand Down
24 changes: 22 additions & 2 deletions docker/main/rootfs/usr/local/go2rtc/create_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,28 @@
yaml = YAML()

# Check if arbitrary exec sources are allowed (defaults to False for security)
ALLOW_ARBITRARY_EXEC = os.environ.get(
"GO2RTC_ALLOW_ARBITRARY_EXEC", "false"
allow_arbitrary_exec = None
if "GO2RTC_ALLOW_ARBITRARY_EXEC" in os.environ:
allow_arbitrary_exec = os.environ.get("GO2RTC_ALLOW_ARBITRARY_EXEC")
elif (
os.path.isdir("/run/secrets")
and os.access("/run/secrets", os.R_OK)
and "GO2RTC_ALLOW_ARBITRARY_EXEC" in os.listdir("/run/secrets")
):
allow_arbitrary_exec = (
Path(os.path.join("/run/secrets", "GO2RTC_ALLOW_ARBITRARY_EXEC"))
.read_text()
.strip()
)
# check for the add-on options file
elif os.path.isfile("/data/options.json"):
with open("/data/options.json") as f:
raw_options = f.read()
options = json.loads(raw_options)
allow_arbitrary_exec = options.get("go2rtc_allow_arbitrary_exec")

ALLOW_ARBITRARY_EXEC = allow_arbitrary_exec is not None and str(
allow_arbitrary_exec
).lower() in ("true", "1", "yes")

FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")}
Expand Down
33 changes: 25 additions & 8 deletions docs/docs/configuration/genai/config.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,12 +41,12 @@ If you are trying to use a single model for Frigate and HomeAssistant, it will n

The following models are recommended:

| Model | Notes |
| ----------------- | -------------------------------------------------------------------- |
| `qwen3-vl` | Strong visual and situational understanding, higher vram requirement |
| `Intern3.5VL` | Relatively fast with good vision comprehension |
| `gemma3` | Strong frame-to-frame understanding, slower inference times |
| `qwen2.5-vl` | Fast but capable model with good vision comprehension |
| Model | Notes |
| ------------- | -------------------------------------------------------------------- |
| `qwen3-vl` | Strong visual and situational understanding, higher vram requirement |
| `Intern3.5VL` | Relatively fast with good vision comprehension |
| `gemma3` | Strong frame-to-frame understanding, slower inference times |
| `qwen2.5-vl` | Fast but capable model with good vision comprehension |

:::note

Expand All @@ -61,10 +61,10 @@ genai:
provider: ollama
base_url: http://localhost:11434
model: minicpm-v:8b
provider_options: # other Ollama client options can be defined
provider_options: # other Ollama client options can be defined
keep_alive: -1
options:
num_ctx: 8192 # make sure the context matches other services that are using ollama
num_ctx: 8192 # make sure the context matches other services that are using ollama
```
## Google Gemini
Expand Down Expand Up @@ -120,6 +120,23 @@ To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` env

:::

:::tip

For OpenAI-compatible servers (such as llama.cpp) that don't expose the configured context size in the API response, you can manually specify the context size in `provider_options`:

```yaml
genai:
provider: openai
base_url: http://your-llama-server
model: your-model-name
provider_options:
context_size: 8192 # Specify the configured context size
```

This ensures Frigate uses the correct context window size when generating prompts.

:::

## Azure OpenAI

Microsoft offers several vision models through Azure OpenAI. A subscription is required.
Expand Down
3 changes: 3 additions & 0 deletions docs/docs/configuration/reference.md
Original file line number Diff line number Diff line change
Expand Up @@ -696,6 +696,9 @@ genai:
# Optional additional args to pass to the GenAI Provider (default: None)
provider_options:
keep_alive: -1
# Optional: Options to pass during inference calls (default: {})
runtime_options:
temperature: 0.7

# Optional: Configuration for audio transcription
# NOTE: only the enabled option can be overridden at the camera level
Expand Down
2 changes: 1 addition & 1 deletion frigate/api/defs/query/review_query_parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ class ReviewQueryParams(BaseModel):
cameras: str = "all"
labels: str = "all"
zones: str = "all"
reviewed: int = 0
reviewed: Union[int, SkipJsonSchema[None]] = None
limit: Union[int, SkipJsonSchema[None]] = None
severity: Union[SeverityEnum, SkipJsonSchema[None]] = None
before: Union[float, SkipJsonSchema[None]] = None
Expand Down
3 changes: 3 additions & 0 deletions frigate/config/camera/genai.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,3 +26,6 @@ class GenAIConfig(FrigateBaseModel):
provider_options: dict[str, Any] = Field(
default={}, title="GenAI Provider extra options."
)
runtime_options: dict[str, Any] = Field(
default={}, title="Options to pass during inference calls."
)
1 change: 1 addition & 0 deletions frigate/genai/azure-openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
},
],
timeout=self.timeout,
**self.genai_config.runtime_options,
)
except Exception as e:
logger.warning("Azure OpenAI returned an error: %s", str(e))
Expand Down
6 changes: 5 additions & 1 deletion frigate/genai/gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,14 @@ def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
for img in images
] + [prompt]
try:
# Merge runtime_options into generation_config if provided
generation_config_dict = {"candidate_count": 1}
generation_config_dict.update(self.genai_config.runtime_options)

response = self.provider.generate_content(
data,
generation_config=genai.types.GenerationConfig(
candidate_count=1,
**generation_config_dict
),
request_options=genai.types.RequestOptions(
timeout=self.timeout,
Expand Down
6 changes: 5 additions & 1 deletion frigate/genai/ollama.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,15 @@ def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
)
return None
try:
ollama_options = {
**self.provider_options,
**self.genai_config.runtime_options,
}
result = self.provider.generate(
self.genai_config.model,
prompt,
images=images if images else None,
**self.provider_options,
**ollama_options,
)
logger.debug(
f"Ollama tokens used: eval_count={result.get('eval_count')}, prompt_eval_count={result.get('prompt_eval_count')}"
Expand Down
22 changes: 19 additions & 3 deletions frigate/genai/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,14 @@ class OpenAIClient(GenAIClient):

def _init_provider(self):
"""Initialize the client."""
return OpenAI(
api_key=self.genai_config.api_key, **self.genai_config.provider_options
)
# Extract context_size from provider_options as it's not a valid OpenAI client parameter
# It will be used in get_context_size() instead
provider_opts = {
k: v
for k, v in self.genai_config.provider_options.items()
if k != "context_size"
}
return OpenAI(api_key=self.genai_config.api_key, **provider_opts)

def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
"""Submit a request to OpenAI."""
Expand Down Expand Up @@ -56,6 +61,7 @@ def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
},
],
timeout=self.timeout,
**self.genai_config.runtime_options,
)
if (
result is not None
Expand All @@ -73,6 +79,16 @@ def get_context_size(self) -> int:
if self.context_size is not None:
return self.context_size

# First check provider_options for manually specified context size
# This is necessary for llama.cpp and other OpenAI-compatible servers
# that don't expose the configured runtime context size in the API response
if "context_size" in self.genai_config.provider_options:
self.context_size = self.genai_config.provider_options["context_size"]
logger.debug(
f"Using context size {self.context_size} from provider_options for model {self.genai_config.model}"
)
return self.context_size

try:
models = self.provider.models.list()
for model in models.data:
Expand Down
7 changes: 7 additions & 0 deletions frigate/util/classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ def write_training_metadata(model_name: str, image_count: int) -> None:
model_name: Name of the classification model
image_count: Number of images used in training
"""
model_name = model_name.strip()
clips_model_dir = os.path.join(CLIPS_DIR, model_name)
os.makedirs(clips_model_dir, exist_ok=True)

Expand Down Expand Up @@ -70,6 +71,7 @@ def read_training_metadata(model_name: str) -> dict[str, any] | None:
Returns:
Dictionary with last_training_date and last_training_image_count, or None if not found
"""
model_name = model_name.strip()
clips_model_dir = os.path.join(CLIPS_DIR, model_name)
metadata_path = os.path.join(clips_model_dir, TRAINING_METADATA_FILE)

Expand All @@ -95,6 +97,7 @@ def get_dataset_image_count(model_name: str) -> int:
Returns:
Total count of images across all categories
"""
model_name = model_name.strip()
dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset")

if not os.path.exists(dataset_dir):
Expand Down Expand Up @@ -126,6 +129,7 @@ def __init__(self, model_name: str) -> None:
"TF_KERAS_MOBILENET_V2_WEIGHTS_URL",
"",
)
model_name = model_name.strip()
super().__init__(
stop_event=None,
priority=PROCESS_PRIORITY_LOW,
Expand Down Expand Up @@ -292,6 +296,7 @@ def __train_classification_model(self) -> bool:
def kickoff_model_training(
embeddingRequestor: EmbeddingsRequestor, model_name: str
) -> None:
model_name = model_name.strip()
requestor = InterProcessRequestor()
requestor.send_data(
UPDATE_MODEL_STATE,
Expand Down Expand Up @@ -359,6 +364,7 @@ def collect_state_classification_examples(
model_name: Name of the classification model
cameras: Dict mapping camera names to normalized crop coordinates [x1, y1, x2, y2] (0-1)
"""
model_name = model_name.strip()
dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset")

# Step 1: Get review items for the cameras
Expand Down Expand Up @@ -714,6 +720,7 @@ def collect_object_classification_examples(
model_name: Name of the classification model
label: Object label to collect (e.g., "person", "car")
"""
model_name = model_name.strip()
dataset_dir = os.path.join(CLIPS_DIR, model_name, "dataset")
temp_dir = os.path.join(dataset_dir, "temp")
os.makedirs(temp_dir, exist_ok=True)
Expand Down
5 changes: 4 additions & 1 deletion web/public/locales/en/config/genai.json
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@
},
"provider_options": {
"label": "GenAI Provider extra options."
},
"runtime_options": {
"label": "Options to pass during inference calls."
}
}
}
}
2 changes: 1 addition & 1 deletion web/src/pages/Events.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ export default function Events() {
cameras: reviewSearchParams["cameras"],
labels: reviewSearchParams["labels"],
zones: reviewSearchParams["zones"],
reviewed: 1,
reviewed: null, // We want both reviewed and unreviewed items as we filter in the UI
before: reviewSearchParams["before"] || last24Hours.before,
after: reviewSearchParams["after"] || last24Hours.after,
};
Expand Down
1 change: 1 addition & 0 deletions web/src/views/live/LiveDashboardView.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@ export default function LiveDashboardView({
{
limit: 10,
severity: "alert",
reviewed: 0,
cameras: alertCameras,
},
]);
Expand Down